1 /*
2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/divnode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/matcher.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/subnode.hpp"
43 #include "opto/subtypenode.hpp"
44 #include "utilities/macros.hpp"
45
46 //=============================================================================
47 //------------------------------split_thru_phi---------------------------------
48 // Split Node 'n' through merge point if there is enough win.
split_thru_phi(Node * n,Node * region,int policy)49 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
50 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
51 // ConvI2L may have type information on it which is unsafe to push up
52 // so disable this for now
53 return NULL;
54 }
55
56 // Splitting range check CastIIs through a loop induction Phi can
57 // cause new Phis to be created that are left unrelated to the loop
58 // induction Phi and prevent optimizations (vectorization)
59 if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check() &&
60 region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) {
61 return NULL;
62 }
63
64 // Bail out if 'n' is a Div or Mod node whose zero check was removed earlier (i.e. control is NULL) and its divisor is an induction variable
65 // phi p of a trip-counted (integer) loop whose inputs could be zero (include zero in their type range). p could have a more precise type
66 // range that does not necessarily include all values of its inputs. Since each of these inputs will be a divisor of the newly cloned nodes
67 // of 'n', we need to bail out of one of these divisors could be zero (zero in its type range).
68 if ((n->Opcode() == Op_DivI || n->Opcode() == Op_ModI) && n->in(0) == NULL
69 && region->is_CountedLoop() && n->in(2) == region->as_CountedLoop()->phi()) {
70 Node* phi = region->as_CountedLoop()->phi();
71 for (uint i = 1; i < phi->req(); i++) {
72 if (_igvn.type(phi->in(i))->filter_speculative(TypeInt::ZERO) != Type::TOP) {
73 // Zero could be a possible value but we already removed the zero check. Bail out to avoid a possible division by zero at a later point.
74 return NULL;
75 }
76 }
77 }
78
79 int wins = 0;
80 assert(!n->is_CFG(), "");
81 assert(region->is_Region(), "");
82
83 const Type* type = n->bottom_type();
84 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
85 Node* phi;
86 if (t_oop != NULL && t_oop->is_known_instance_field()) {
87 int iid = t_oop->instance_id();
88 int index = C->get_alias_index(t_oop);
89 int offset = t_oop->offset();
90 phi = new PhiNode(region, type, NULL, iid, index, offset);
91 } else {
92 phi = PhiNode::make_blank(region, n);
93 }
94 uint old_unique = C->unique();
95 for (uint i = 1; i < region->req(); i++) {
96 Node* x;
97 Node* the_clone = NULL;
98 if (region->in(i) == C->top()) {
99 x = C->top(); // Dead path? Use a dead data op
100 } else {
101 x = n->clone(); // Else clone up the data op
102 the_clone = x; // Remember for possible deletion.
103 // Alter data node to use pre-phi inputs
104 if (n->in(0) == region)
105 x->set_req( 0, region->in(i) );
106 for (uint j = 1; j < n->req(); j++) {
107 Node* in = n->in(j);
108 if (in->is_Phi() && in->in(0) == region)
109 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
110 }
111 }
112 // Check for a 'win' on some paths
113 const Type* t = x->Value(&_igvn);
114
115 bool singleton = t->singleton();
116
117 // A TOP singleton indicates that there are no possible values incoming
118 // along a particular edge. In most cases, this is OK, and the Phi will
119 // be eliminated later in an Ideal call. However, we can't allow this to
120 // happen if the singleton occurs on loop entry, as the elimination of
121 // the PhiNode may cause the resulting node to migrate back to a previous
122 // loop iteration.
123 if (singleton && t == Type::TOP) {
124 // Is_Loop() == false does not confirm the absence of a loop (e.g., an
125 // irreducible loop may not be indicated by an affirmative is_Loop());
126 // therefore, the only top we can split thru a phi is on a backedge of
127 // a loop.
128 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
129 }
130
131 if (singleton) {
132 wins++;
133 x = ((PhaseGVN&)_igvn).makecon(t);
134 } else {
135 // We now call Identity to try to simplify the cloned node.
136 // Note that some Identity methods call phase->type(this).
137 // Make sure that the type array is big enough for
138 // our new node, even though we may throw the node away.
139 // (Note: This tweaking with igvn only works because x is a new node.)
140 _igvn.set_type(x, t);
141 // If x is a TypeNode, capture any more-precise type permanently into Node
142 // otherwise it will be not updated during igvn->transform since
143 // igvn->type(x) is set to x->Value() already.
144 x->raise_bottom_type(t);
145 Node* y = x->Identity(&_igvn);
146 if (y != x) {
147 wins++;
148 x = y;
149 } else {
150 y = _igvn.hash_find(x);
151 if (y) {
152 wins++;
153 x = y;
154 } else {
155 // Else x is a new node we are keeping
156 // We do not need register_new_node_with_optimizer
157 // because set_type has already been called.
158 _igvn._worklist.push(x);
159 }
160 }
161 }
162 if (x != the_clone && the_clone != NULL)
163 _igvn.remove_dead_node(the_clone);
164 phi->set_req( i, x );
165 }
166 // Too few wins?
167 if (wins <= policy) {
168 _igvn.remove_dead_node(phi);
169 return NULL;
170 }
171
172 // Record Phi
173 register_new_node( phi, region );
174
175 for (uint i2 = 1; i2 < phi->req(); i2++) {
176 Node *x = phi->in(i2);
177 // If we commoned up the cloned 'x' with another existing Node,
178 // the existing Node picks up a new use. We need to make the
179 // existing Node occur higher up so it dominates its uses.
180 Node *old_ctrl;
181 IdealLoopTree *old_loop;
182
183 if (x->is_Con()) {
184 // Constant's control is always root.
185 set_ctrl(x, C->root());
186 continue;
187 }
188 // The occasional new node
189 if (x->_idx >= old_unique) { // Found a new, unplaced node?
190 old_ctrl = NULL;
191 old_loop = NULL; // Not in any prior loop
192 } else {
193 old_ctrl = get_ctrl(x);
194 old_loop = get_loop(old_ctrl); // Get prior loop
195 }
196 // New late point must dominate new use
197 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
198 if (new_ctrl == old_ctrl) // Nothing is changed
199 continue;
200
201 IdealLoopTree *new_loop = get_loop(new_ctrl);
202
203 // Don't move x into a loop if its uses are
204 // outside of loop. Otherwise x will be cloned
205 // for each use outside of this loop.
206 IdealLoopTree *use_loop = get_loop(region);
207 if (!new_loop->is_member(use_loop) &&
208 (old_loop == NULL || !new_loop->is_member(old_loop))) {
209 // Take early control, later control will be recalculated
210 // during next iteration of loop optimizations.
211 new_ctrl = get_early_ctrl(x);
212 new_loop = get_loop(new_ctrl);
213 }
214 // Set new location
215 set_ctrl(x, new_ctrl);
216 // If changing loop bodies, see if we need to collect into new body
217 if (old_loop != new_loop) {
218 if (old_loop && !old_loop->_child)
219 old_loop->_body.yank(x);
220 if (!new_loop->_child)
221 new_loop->_body.push(x); // Collect body info
222 }
223 }
224
225 return phi;
226 }
227
228 //------------------------------dominated_by------------------------------------
229 // Replace the dominated test with an obvious true or false. Place it on the
230 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
231 // live path up to the dominating control.
dominated_by(Node * prevdom,Node * iff,bool flip,bool exclude_loop_predicate)232 void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) {
233 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
234
235 // prevdom is the dominating projection of the dominating test.
236 assert( iff->is_If(), "" );
237 assert(iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd || iff->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
238 int pop = prevdom->Opcode();
239 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
240 if (flip) {
241 if (pop == Op_IfTrue)
242 pop = Op_IfFalse;
243 else
244 pop = Op_IfTrue;
245 }
246 // 'con' is set to true or false to kill the dominated test.
247 Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
248 set_ctrl(con, C->root()); // Constant gets a new use
249 // Hack the dominated test
250 _igvn.replace_input_of(iff, 1, con);
251
252 // If I dont have a reachable TRUE and FALSE path following the IfNode then
253 // I can assume this path reaches an infinite loop. In this case it's not
254 // important to optimize the data Nodes - either the whole compilation will
255 // be tossed or this path (and all data Nodes) will go dead.
256 if (iff->outcnt() != 2) return;
257
258 // Make control-dependent data Nodes on the live path (path that will remain
259 // once the dominated IF is removed) become control-dependent on the
260 // dominating projection.
261 Node* dp = iff->as_If()->proj_out_or_null(pop == Op_IfTrue);
262
263 // Loop predicates may have depending checks which should not
264 // be skipped. For example, range check predicate has two checks
265 // for lower and upper bounds.
266 if (dp == NULL)
267 return;
268
269 ProjNode* dp_proj = dp->as_Proj();
270 ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
271 if (exclude_loop_predicate &&
272 (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL ||
273 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL ||
274 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) {
275 // If this is a range check (IfNode::is_range_check), do not
276 // reorder because Compile::allow_range_check_smearing might have
277 // changed the check.
278 return; // Let IGVN transformation change control dependence.
279 }
280
281 IdealLoopTree *old_loop = get_loop(dp);
282
283 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
284 Node* cd = dp->fast_out(i); // Control-dependent node
285 if (cd->depends_only_on_test()) {
286 assert(cd->in(0) == dp, "");
287 _igvn.replace_input_of(cd, 0, prevdom);
288 set_early_ctrl(cd);
289 IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
290 if (old_loop != new_loop) {
291 if (!old_loop->_child) old_loop->_body.yank(cd);
292 if (!new_loop->_child) new_loop->_body.push(cd);
293 }
294 --i;
295 --imax;
296 }
297 }
298 }
299
300 //------------------------------has_local_phi_input----------------------------
301 // Return TRUE if 'n' has Phi inputs from its local block and no other
302 // block-local inputs (all non-local-phi inputs come from earlier blocks)
has_local_phi_input(Node * n)303 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
304 Node *n_ctrl = get_ctrl(n);
305 // See if some inputs come from a Phi in this block, or from before
306 // this block.
307 uint i;
308 for( i = 1; i < n->req(); i++ ) {
309 Node *phi = n->in(i);
310 if( phi->is_Phi() && phi->in(0) == n_ctrl )
311 break;
312 }
313 if( i >= n->req() )
314 return NULL; // No Phi inputs; nowhere to clone thru
315
316 // Check for inputs created between 'n' and the Phi input. These
317 // must split as well; they have already been given the chance
318 // (courtesy of a post-order visit) and since they did not we must
319 // recover the 'cost' of splitting them by being very profitable
320 // when splitting 'n'. Since this is unlikely we simply give up.
321 for( i = 1; i < n->req(); i++ ) {
322 Node *m = n->in(i);
323 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
324 // We allow the special case of AddP's with no local inputs.
325 // This allows us to split-up address expressions.
326 if (m->is_AddP() &&
327 get_ctrl(m->in(2)) != n_ctrl &&
328 get_ctrl(m->in(3)) != n_ctrl) {
329 // Move the AddP up to dominating point
330 Node* c = find_non_split_ctrl(idom(n_ctrl));
331 if (c->is_OuterStripMinedLoop()) {
332 c->as_Loop()->verify_strip_mined(1);
333 c = c->in(LoopNode::EntryControl);
334 }
335 set_ctrl_and_loop(m, c);
336 continue;
337 }
338 return NULL;
339 }
340 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
341 }
342
343 return n_ctrl;
344 }
345
346 //------------------------------remix_address_expressions----------------------
347 // Rework addressing expressions to get the most loop-invariant stuff
348 // moved out. We'd like to do all associative operators, but it's especially
349 // important (common) to do address expressions.
remix_address_expressions(Node * n)350 Node *PhaseIdealLoop::remix_address_expressions( Node *n ) {
351 if (!has_ctrl(n)) return NULL;
352 Node *n_ctrl = get_ctrl(n);
353 IdealLoopTree *n_loop = get_loop(n_ctrl);
354
355 // See if 'n' mixes loop-varying and loop-invariant inputs and
356 // itself is loop-varying.
357
358 // Only interested in binary ops (and AddP)
359 if( n->req() < 3 || n->req() > 4 ) return NULL;
360
361 Node *n1_ctrl = get_ctrl(n->in( 1));
362 Node *n2_ctrl = get_ctrl(n->in( 2));
363 Node *n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
364 IdealLoopTree *n1_loop = get_loop( n1_ctrl );
365 IdealLoopTree *n2_loop = get_loop( n2_ctrl );
366 IdealLoopTree *n3_loop = get_loop( n3_ctrl );
367
368 // Does one of my inputs spin in a tighter loop than self?
369 if( (n_loop->is_member( n1_loop ) && n_loop != n1_loop) ||
370 (n_loop->is_member( n2_loop ) && n_loop != n2_loop) ||
371 (n_loop->is_member( n3_loop ) && n_loop != n3_loop) )
372 return NULL; // Leave well enough alone
373
374 // Is at least one of my inputs loop-invariant?
375 if( n1_loop == n_loop &&
376 n2_loop == n_loop &&
377 n3_loop == n_loop )
378 return NULL; // No loop-invariant inputs
379
380
381 int n_op = n->Opcode();
382
383 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
384 if( n_op == Op_LShiftI ) {
385 // Scale is loop invariant
386 Node *scale = n->in(2);
387 Node *scale_ctrl = get_ctrl(scale);
388 IdealLoopTree *scale_loop = get_loop(scale_ctrl );
389 if( n_loop == scale_loop || !scale_loop->is_member( n_loop ) )
390 return NULL;
391 const TypeInt *scale_t = scale->bottom_type()->isa_int();
392 if( scale_t && scale_t->is_con() && scale_t->get_con() >= 16 )
393 return NULL; // Dont bother with byte/short masking
394 // Add must vary with loop (else shift would be loop-invariant)
395 Node *add = n->in(1);
396 Node *add_ctrl = get_ctrl(add);
397 IdealLoopTree *add_loop = get_loop(add_ctrl);
398 //assert( n_loop == add_loop, "" );
399 if( n_loop != add_loop ) return NULL; // happens w/ evil ZKM loops
400
401 // Convert I-V into I+ (0-V); same for V-I
402 if( add->Opcode() == Op_SubI &&
403 _igvn.type( add->in(1) ) != TypeInt::ZERO ) {
404 Node *zero = _igvn.intcon(0);
405 set_ctrl(zero, C->root());
406 Node *neg = new SubINode( _igvn.intcon(0), add->in(2) );
407 register_new_node( neg, get_ctrl(add->in(2) ) );
408 add = new AddINode( add->in(1), neg );
409 register_new_node( add, add_ctrl );
410 }
411 if( add->Opcode() != Op_AddI ) return NULL;
412 // See if one add input is loop invariant
413 Node *add_var = add->in(1);
414 Node *add_var_ctrl = get_ctrl(add_var);
415 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
416 Node *add_invar = add->in(2);
417 Node *add_invar_ctrl = get_ctrl(add_invar);
418 IdealLoopTree *add_invar_loop = get_loop(add_invar_ctrl );
419 if( add_var_loop == n_loop ) {
420 } else if( add_invar_loop == n_loop ) {
421 // Swap to find the invariant part
422 add_invar = add_var;
423 add_invar_ctrl = add_var_ctrl;
424 add_invar_loop = add_var_loop;
425 add_var = add->in(2);
426 Node *add_var_ctrl = get_ctrl(add_var);
427 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl );
428 } else // Else neither input is loop invariant
429 return NULL;
430 if( n_loop == add_invar_loop || !add_invar_loop->is_member( n_loop ) )
431 return NULL; // No invariant part of the add?
432
433 // Yes! Reshape address expression!
434 Node *inv_scale = new LShiftINode( add_invar, scale );
435 Node *inv_scale_ctrl =
436 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
437 add_invar_ctrl : scale_ctrl;
438 register_new_node( inv_scale, inv_scale_ctrl );
439 Node *var_scale = new LShiftINode( add_var, scale );
440 register_new_node( var_scale, n_ctrl );
441 Node *var_add = new AddINode( var_scale, inv_scale );
442 register_new_node( var_add, n_ctrl );
443 _igvn.replace_node( n, var_add );
444 return var_add;
445 }
446
447 // Replace (I+V) with (V+I)
448 if( n_op == Op_AddI ||
449 n_op == Op_AddL ||
450 n_op == Op_AddF ||
451 n_op == Op_AddD ||
452 n_op == Op_MulI ||
453 n_op == Op_MulL ||
454 n_op == Op_MulF ||
455 n_op == Op_MulD ) {
456 if( n2_loop == n_loop ) {
457 assert( n1_loop != n_loop, "" );
458 n->swap_edges(1, 2);
459 }
460 }
461
462 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
463 // but not if I2 is a constant.
464 if( n_op == Op_AddP ) {
465 if( n2_loop == n_loop && n3_loop != n_loop ) {
466 if( n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con() ) {
467 Node *n22_ctrl = get_ctrl(n->in(2)->in(2));
468 Node *n23_ctrl = get_ctrl(n->in(2)->in(3));
469 IdealLoopTree *n22loop = get_loop( n22_ctrl );
470 IdealLoopTree *n23_loop = get_loop( n23_ctrl );
471 if( n22loop != n_loop && n22loop->is_member(n_loop) &&
472 n23_loop == n_loop ) {
473 Node *add1 = new AddPNode( n->in(1), n->in(2)->in(2), n->in(3) );
474 // Stuff new AddP in the loop preheader
475 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
476 Node *add2 = new AddPNode( n->in(1), add1, n->in(2)->in(3) );
477 register_new_node( add2, n_ctrl );
478 _igvn.replace_node( n, add2 );
479 return add2;
480 }
481 }
482 }
483
484 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
485 if (n2_loop != n_loop && n3_loop == n_loop) {
486 if (n->in(3)->Opcode() == Op_AddX) {
487 Node *V = n->in(3)->in(1);
488 Node *I = n->in(3)->in(2);
489 if (is_member(n_loop,get_ctrl(V))) {
490 } else {
491 Node *tmp = V; V = I; I = tmp;
492 }
493 if (!is_member(n_loop,get_ctrl(I))) {
494 Node *add1 = new AddPNode(n->in(1), n->in(2), I);
495 // Stuff new AddP in the loop preheader
496 register_new_node(add1, n_loop->_head->in(LoopNode::EntryControl));
497 Node *add2 = new AddPNode(n->in(1), add1, V);
498 register_new_node(add2, n_ctrl);
499 _igvn.replace_node(n, add2);
500 return add2;
501 }
502 }
503 }
504 }
505
506 return NULL;
507 }
508
509 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
convert_add_to_muladd(Node * n)510 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
511 assert(n->Opcode() == Op_AddI, "sanity");
512 Node * nn = NULL;
513 Node * in1 = n->in(1);
514 Node * in2 = n->in(2);
515 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
516 IdealLoopTree* loop_n = get_loop(get_ctrl(n));
517 if (loop_n->is_counted() &&
518 loop_n->_head->as_Loop()->is_valid_counted_loop() &&
519 Matcher::match_rule_supported(Op_MulAddVS2VI) &&
520 Matcher::match_rule_supported(Op_MulAddS2I)) {
521 Node* mul_in1 = in1->in(1);
522 Node* mul_in2 = in1->in(2);
523 Node* mul_in3 = in2->in(1);
524 Node* mul_in4 = in2->in(2);
525 if (mul_in1->Opcode() == Op_LoadS &&
526 mul_in2->Opcode() == Op_LoadS &&
527 mul_in3->Opcode() == Op_LoadS &&
528 mul_in4->Opcode() == Op_LoadS) {
529 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
530 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
531 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
532 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
533 IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
534 IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
535 // All nodes should be in the same counted loop.
536 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
537 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
538 Node* adr1 = mul_in1->in(MemNode::Address);
539 Node* adr2 = mul_in2->in(MemNode::Address);
540 Node* adr3 = mul_in3->in(MemNode::Address);
541 Node* adr4 = mul_in4->in(MemNode::Address);
542 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
543 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
544 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
545 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
546 register_new_node(nn, get_ctrl(n));
547 _igvn.replace_node(n, nn);
548 return nn;
549 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
550 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
551 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
552 register_new_node(nn, get_ctrl(n));
553 _igvn.replace_node(n, nn);
554 return nn;
555 }
556 }
557 }
558 }
559 }
560 }
561 return nn;
562 }
563
564 //------------------------------conditional_move-------------------------------
565 // Attempt to replace a Phi with a conditional move. We have some pretty
566 // strict profitability requirements. All Phis at the merge point must
567 // be converted, so we can remove the control flow. We need to limit the
568 // number of c-moves to a small handful. All code that was in the side-arms
569 // of the CFG diamond is now speculatively executed. This code has to be
570 // "cheap enough". We are pretty much limited to CFG diamonds that merge
571 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
conditional_move(Node * region)572 Node *PhaseIdealLoop::conditional_move( Node *region ) {
573
574 assert(region->is_Region(), "sanity check");
575 if (region->req() != 3) return NULL;
576
577 // Check for CFG diamond
578 Node *lp = region->in(1);
579 Node *rp = region->in(2);
580 if (!lp || !rp) return NULL;
581 Node *lp_c = lp->in(0);
582 if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL;
583 IfNode *iff = lp_c->as_If();
584
585 // Check for ops pinned in an arm of the diamond.
586 // Can't remove the control flow in this case
587 if (lp->outcnt() > 1) return NULL;
588 if (rp->outcnt() > 1) return NULL;
589
590 IdealLoopTree* r_loop = get_loop(region);
591 assert(r_loop == get_loop(iff), "sanity");
592 // Always convert to CMOVE if all results are used only outside this loop.
593 bool used_inside_loop = (r_loop == _ltree_root);
594
595 // Check profitability
596 int cost = 0;
597 int phis = 0;
598 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
599 Node *out = region->fast_out(i);
600 if (!out->is_Phi()) continue; // Ignore other control edges, etc
601 phis++;
602 PhiNode* phi = out->as_Phi();
603 BasicType bt = phi->type()->basic_type();
604 switch (bt) {
605 case T_DOUBLE:
606 case T_FLOAT:
607 if (C->use_cmove()) {
608 continue; //TODO: maybe we want to add some cost
609 }
610 cost += Matcher::float_cmove_cost(); // Could be very expensive
611 break;
612 case T_LONG: {
613 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
614 }
615 case T_INT: // These all CMOV fine
616 case T_ADDRESS: { // (RawPtr)
617 cost++;
618 break;
619 }
620 case T_NARROWOOP: // Fall through
621 case T_OBJECT: { // Base oops are OK, but not derived oops
622 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
623 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
624 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
625 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
626 // have a Phi for the base here that we convert to a CMOVE all is well
627 // and good. But if the base is dead, we'll not make a CMOVE. Later
628 // the allocator will have to produce a base by creating a CMOVE of the
629 // relevant bases. This puts the allocator in the business of
630 // manufacturing expensive instructions, generally a bad plan.
631 // Just Say No to Conditionally-Moved Derived Pointers.
632 if (tp && tp->offset() != 0)
633 return NULL;
634 cost++;
635 break;
636 }
637 default:
638 return NULL; // In particular, can't do memory or I/O
639 }
640 // Add in cost any speculative ops
641 for (uint j = 1; j < region->req(); j++) {
642 Node *proj = region->in(j);
643 Node *inp = phi->in(j);
644 if (get_ctrl(inp) == proj) { // Found local op
645 cost++;
646 // Check for a chain of dependent ops; these will all become
647 // speculative in a CMOV.
648 for (uint k = 1; k < inp->req(); k++)
649 if (get_ctrl(inp->in(k)) == proj)
650 cost += ConditionalMoveLimit; // Too much speculative goo
651 }
652 }
653 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
654 // This will likely Split-If, a higher-payoff operation.
655 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
656 Node* use = phi->fast_out(k);
657 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
658 cost += ConditionalMoveLimit;
659 // Is there a use inside the loop?
660 // Note: check only basic types since CMoveP is pinned.
661 if (!used_inside_loop && is_java_primitive(bt)) {
662 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
663 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
664 used_inside_loop = true;
665 }
666 }
667 }
668 }//for
669 Node* bol = iff->in(1);
670 if (bol->Opcode() == Op_Opaque4) {
671 return NULL; // Ignore loop predicate checks (the Opaque4 ensures they will go away)
672 }
673 assert(bol->Opcode() == Op_Bool, "Unexpected node");
674 int cmp_op = bol->in(1)->Opcode();
675 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
676 return NULL;
677 }
678 // It is expensive to generate flags from a float compare.
679 // Avoid duplicated float compare.
680 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL;
681
682 float infrequent_prob = PROB_UNLIKELY_MAG(3);
683 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
684 if (used_inside_loop) {
685 if (cost >= ConditionalMoveLimit) return NULL; // Too much goo
686
687 // BlockLayoutByFrequency optimization moves infrequent branch
688 // from hot path. No point in CMOV'ing in such case (110 is used
689 // instead of 100 to take into account not exactness of float value).
690 if (BlockLayoutByFrequency) {
691 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
692 }
693 }
694 // Check for highly predictable branch. No point in CMOV'ing if
695 // we are going to predict accurately all the time.
696 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
697 //keep going
698 } else if (iff->_prob < infrequent_prob ||
699 iff->_prob > (1.0f - infrequent_prob))
700 return NULL;
701
702 // --------------
703 // Now replace all Phis with CMOV's
704 Node *cmov_ctrl = iff->in(0);
705 uint flip = (lp->Opcode() == Op_IfTrue);
706 Node_List wq;
707 while (1) {
708 PhiNode* phi = NULL;
709 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
710 Node *out = region->fast_out(i);
711 if (out->is_Phi()) {
712 phi = out->as_Phi();
713 break;
714 }
715 }
716 if (phi == NULL) break;
717 if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
718 // Move speculative ops
719 wq.push(phi);
720 while (wq.size() > 0) {
721 Node *n = wq.pop();
722 for (uint j = 1; j < n->req(); j++) {
723 Node* m = n->in(j);
724 if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) {
725 #ifndef PRODUCT
726 if (PrintOpto && VerifyLoopOptimizations) {
727 tty->print(" speculate: ");
728 m->dump();
729 }
730 #endif
731 set_ctrl(m, cmov_ctrl);
732 wq.push(m);
733 }
734 }
735 }
736 Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
737 register_new_node( cmov, cmov_ctrl );
738 _igvn.replace_node( phi, cmov );
739 #ifndef PRODUCT
740 if (TraceLoopOpts) {
741 tty->print("CMOV ");
742 r_loop->dump_head();
743 if (Verbose) {
744 bol->in(1)->dump(1);
745 cmov->dump(1);
746 }
747 }
748 if (VerifyLoopOptimizations) verify();
749 #endif
750 }
751
752 // The useless CFG diamond will fold up later; see the optimization in
753 // RegionNode::Ideal.
754 _igvn._worklist.push(region);
755
756 return iff->in(1);
757 }
758
enqueue_cfg_uses(Node * m,Unique_Node_List & wq)759 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
760 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
761 Node* u = m->fast_out(i);
762 if (u->is_CFG()) {
763 if (u->Opcode() == Op_NeverBranch) {
764 u = ((NeverBranchNode*)u)->proj_out(0);
765 enqueue_cfg_uses(u, wq);
766 } else {
767 wq.push(u);
768 }
769 }
770 }
771 }
772
773 // Try moving a store out of a loop, right before the loop
try_move_store_before_loop(Node * n,Node * n_ctrl)774 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
775 // Store has to be first in the loop body
776 IdealLoopTree *n_loop = get_loop(n_ctrl);
777 if (n->is_Store() && n_loop != _ltree_root &&
778 n_loop->is_loop() && n_loop->_head->is_Loop() &&
779 n->in(0) != NULL) {
780 Node* address = n->in(MemNode::Address);
781 Node* value = n->in(MemNode::ValueIn);
782 Node* mem = n->in(MemNode::Memory);
783 IdealLoopTree* address_loop = get_loop(get_ctrl(address));
784 IdealLoopTree* value_loop = get_loop(get_ctrl(value));
785
786 // - address and value must be loop invariant
787 // - memory must be a memory Phi for the loop
788 // - Store must be the only store on this memory slice in the
789 // loop: if there's another store following this one then value
790 // written at iteration i by the second store could be overwritten
791 // at iteration i+n by the first store: it's not safe to move the
792 // first store out of the loop
793 // - nothing must observe the memory Phi: it guarantees no read
794 // before the store, we are also guaranteed the store post
795 // dominates the loop head (ignoring a possible early
796 // exit). Otherwise there would be extra Phi involved between the
797 // loop's Phi and the store.
798 // - there must be no early exit from the loop before the Store
799 // (such an exit most of the time would be an extra use of the
800 // memory Phi but sometimes is a bottom memory Phi that takes the
801 // store as input).
802
803 if (!n_loop->is_member(address_loop) &&
804 !n_loop->is_member(value_loop) &&
805 mem->is_Phi() && mem->in(0) == n_loop->_head &&
806 mem->outcnt() == 1 &&
807 mem->in(LoopNode::LoopBackControl) == n) {
808
809 assert(n_loop->_tail != NULL, "need a tail");
810 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
811
812 // Verify that there's no early exit of the loop before the store.
813 bool ctrl_ok = false;
814 {
815 // Follow control from loop head until n, we exit the loop or
816 // we reach the tail
817 ResourceMark rm;
818 Unique_Node_List wq;
819 wq.push(n_loop->_head);
820
821 for (uint next = 0; next < wq.size(); ++next) {
822 Node *m = wq.at(next);
823 if (m == n->in(0)) {
824 ctrl_ok = true;
825 continue;
826 }
827 assert(!has_ctrl(m), "should be CFG");
828 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
829 ctrl_ok = false;
830 break;
831 }
832 enqueue_cfg_uses(m, wq);
833 if (wq.size() > 10) {
834 ctrl_ok = false;
835 break;
836 }
837 }
838 }
839 if (ctrl_ok) {
840 // move the Store
841 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
842 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
843 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
844 // Disconnect the phi now. An empty phi can confuse other
845 // optimizations in this pass of loop opts.
846 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
847 n_loop->_body.yank(mem);
848
849 set_ctrl_and_loop(n, n->in(0));
850
851 return n;
852 }
853 }
854 }
855 return NULL;
856 }
857
858 // Try moving a store out of a loop, right after the loop
try_move_store_after_loop(Node * n)859 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
860 if (n->is_Store() && n->in(0) != NULL) {
861 Node *n_ctrl = get_ctrl(n);
862 IdealLoopTree *n_loop = get_loop(n_ctrl);
863 // Store must be in a loop
864 if (n_loop != _ltree_root && !n_loop->_irreducible) {
865 Node* address = n->in(MemNode::Address);
866 Node* value = n->in(MemNode::ValueIn);
867 IdealLoopTree* address_loop = get_loop(get_ctrl(address));
868 // address must be loop invariant
869 if (!n_loop->is_member(address_loop)) {
870 // Store must be last on this memory slice in the loop and
871 // nothing in the loop must observe it
872 Node* phi = NULL;
873 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
874 Node* u = n->fast_out(i);
875 if (has_ctrl(u)) { // control use?
876 IdealLoopTree *u_loop = get_loop(get_ctrl(u));
877 if (!n_loop->is_member(u_loop)) {
878 continue;
879 }
880 if (u->is_Phi() && u->in(0) == n_loop->_head) {
881 assert(_igvn.type(u) == Type::MEMORY, "bad phi");
882 // multiple phis on the same slice are possible
883 if (phi != NULL) {
884 return;
885 }
886 phi = u;
887 continue;
888 }
889 }
890 return;
891 }
892 if (phi != NULL) {
893 // Nothing in the loop before the store (next iteration)
894 // must observe the stored value
895 bool mem_ok = true;
896 {
897 ResourceMark rm;
898 Unique_Node_List wq;
899 wq.push(phi);
900 for (uint next = 0; next < wq.size() && mem_ok; ++next) {
901 Node *m = wq.at(next);
902 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
903 Node* u = m->fast_out(i);
904 if (u->is_Store() || u->is_Phi()) {
905 if (u != n) {
906 wq.push(u);
907 mem_ok = (wq.size() <= 10);
908 }
909 } else {
910 mem_ok = false;
911 break;
912 }
913 }
914 }
915 }
916 if (mem_ok) {
917 // Move the store out of the loop if the LCA of all
918 // users (except for the phi) is outside the loop.
919 Node* hook = new Node(1);
920 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
921 _igvn.rehash_node_delayed(phi);
922 int count = phi->replace_edge(n, hook);
923 assert(count > 0, "inconsistent phi");
924
925 // Compute latest point this store can go
926 Node* lca = get_late_ctrl(n, get_ctrl(n));
927 if (lca->is_OuterStripMinedLoop()) {
928 lca = lca->in(LoopNode::EntryControl);
929 }
930 if (n_loop->is_member(get_loop(lca))) {
931 // LCA is in the loop - bail out
932 _igvn.replace_node(hook, n);
933 return;
934 }
935 #ifdef ASSERT
936 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
937 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
938 n_loop->_head->as_Loop()->verify_strip_mined(1);
939 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
940 IdealLoopTree* outer_loop = get_loop(outer);
941 assert(n_loop->_parent == outer_loop, "broken loop tree");
942 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
943 }
944 #endif
945
946 // Move store out of the loop
947 _igvn.replace_node(hook, n->in(MemNode::Memory));
948 _igvn.replace_input_of(n, 0, lca);
949 set_ctrl_and_loop(n, lca);
950
951 // Disconnect the phi now. An empty phi can confuse other
952 // optimizations in this pass of loop opts..
953 if (phi->in(LoopNode::LoopBackControl) == phi) {
954 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
955 n_loop->_body.yank(phi);
956 }
957 }
958 }
959 }
960 }
961 }
962 }
963
964 //------------------------------split_if_with_blocks_pre-----------------------
965 // Do the real work in a non-recursive function. Data nodes want to be
966 // cloned in the pre-order so they can feed each other nicely.
split_if_with_blocks_pre(Node * n)967 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
968 // Cloning these guys is unlikely to win
969 int n_op = n->Opcode();
970 if (n_op == Op_MergeMem) {
971 return n;
972 }
973 if (n->is_Proj()) {
974 return n;
975 }
976 // Do not clone-up CmpFXXX variations, as these are always
977 // followed by a CmpI
978 if (n->is_Cmp()) {
979 return n;
980 }
981 // Attempt to use a conditional move instead of a phi/branch
982 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
983 Node *cmov = conditional_move( n );
984 if (cmov) {
985 return cmov;
986 }
987 }
988 if (n->is_CFG() || n->is_LoadStore()) {
989 return n;
990 }
991 if (n->is_Opaque1() || // Opaque nodes cannot be mod'd
992 n_op == Op_Opaque2) {
993 if (!C->major_progress()) { // If chance of no more loop opts...
994 _igvn._worklist.push(n); // maybe we'll remove them
995 }
996 return n;
997 }
998
999 if (n->is_Con()) {
1000 return n; // No cloning for Con nodes
1001 }
1002
1003 Node *n_ctrl = get_ctrl(n);
1004 if (!n_ctrl) {
1005 return n; // Dead node
1006 }
1007
1008 Node* res = try_move_store_before_loop(n, n_ctrl);
1009 if (res != NULL) {
1010 return n;
1011 }
1012
1013 // Attempt to remix address expressions for loop invariants
1014 Node *m = remix_address_expressions( n );
1015 if( m ) return m;
1016
1017 if (n_op == Op_AddI) {
1018 Node *nn = convert_add_to_muladd( n );
1019 if ( nn ) return nn;
1020 }
1021
1022 if (n->is_ConstraintCast()) {
1023 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1024 // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1025 // Node control inputs don't necessarily agree with loop control info (due to
1026 // transformations happened in between), thus additional dominance check is needed
1027 // to keep loop info valid.
1028 if (dom_cast != NULL && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1029 _igvn.replace_node(n, dom_cast);
1030 return dom_cast;
1031 }
1032 }
1033
1034 // Determine if the Node has inputs from some local Phi.
1035 // Returns the block to clone thru.
1036 Node *n_blk = has_local_phi_input( n );
1037 if( !n_blk ) return n;
1038
1039 // Do not clone the trip counter through on a CountedLoop
1040 // (messes up the canonical shape).
1041 if( n_blk->is_CountedLoop() && n->Opcode() == Op_AddI ) return n;
1042
1043 // Check for having no control input; not pinned. Allow
1044 // dominating control.
1045 if (n->in(0)) {
1046 Node *dom = idom(n_blk);
1047 if (dom_lca(n->in(0), dom) != n->in(0)) {
1048 return n;
1049 }
1050 }
1051 // Policy: when is it profitable. You must get more wins than
1052 // policy before it is considered profitable. Policy is usually 0,
1053 // so 1 win is considered profitable. Big merges will require big
1054 // cloning, so get a larger policy.
1055 int policy = n_blk->req() >> 2;
1056
1057 // If the loop is a candidate for range check elimination,
1058 // delay splitting through it's phi until a later loop optimization
1059 if (n_blk->is_CountedLoop()) {
1060 IdealLoopTree *lp = get_loop(n_blk);
1061 if (lp && lp->_rce_candidate) {
1062 return n;
1063 }
1064 }
1065
1066 if (must_throttle_split_if()) return n;
1067
1068 // Split 'n' through the merge point if it is profitable
1069 Node *phi = split_thru_phi( n, n_blk, policy );
1070 if (!phi) return n;
1071
1072 // Found a Phi to split thru!
1073 // Replace 'n' with the new phi
1074 _igvn.replace_node( n, phi );
1075 // Moved a load around the loop, 'en-registering' something.
1076 if (n_blk->is_Loop() && n->is_Load() &&
1077 !phi->in(LoopNode::LoopBackControl)->is_Load())
1078 C->set_major_progress();
1079
1080 return phi;
1081 }
1082
merge_point_too_heavy(Compile * C,Node * region)1083 static bool merge_point_too_heavy(Compile* C, Node* region) {
1084 // Bail out if the region and its phis have too many users.
1085 int weight = 0;
1086 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1087 weight += region->fast_out(i)->outcnt();
1088 }
1089 int nodes_left = C->max_node_limit() - C->live_nodes();
1090 if (weight * 8 > nodes_left) {
1091 if (PrintOpto) {
1092 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
1093 }
1094 return true;
1095 } else {
1096 return false;
1097 }
1098 }
1099
merge_point_safe(Node * region)1100 static bool merge_point_safe(Node* region) {
1101 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1102 // having a PhiNode input. This sidesteps the dangerous case where the split
1103 // ConvI2LNode may become TOP if the input Value() does not
1104 // overlap the ConvI2L range, leaving a node which may not dominate its
1105 // uses.
1106 // A better fix for this problem can be found in the BugTraq entry, but
1107 // expediency for Mantis demands this hack.
1108 #ifdef _LP64
1109 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1110 Node* n = region->fast_out(i);
1111 if (n->is_Phi()) {
1112 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1113 Node* m = n->fast_out(j);
1114 if (m->Opcode() == Op_ConvI2L)
1115 return false;
1116 if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
1117 return false;
1118 }
1119 }
1120 }
1121 }
1122 #endif
1123 return true;
1124 }
1125
1126
1127 //------------------------------place_near_use---------------------------------
1128 // Place some computation next to use but not inside inner loops.
1129 // For inner loop uses move it to the preheader area.
place_near_use(Node * useblock) const1130 Node *PhaseIdealLoop::place_near_use(Node *useblock) const {
1131 IdealLoopTree *u_loop = get_loop( useblock );
1132 if (u_loop->_irreducible) {
1133 return useblock;
1134 }
1135 if (u_loop->_child) {
1136 if (useblock == u_loop->_head && u_loop->_head->is_OuterStripMinedLoop()) {
1137 return u_loop->_head->in(LoopNode::EntryControl);
1138 }
1139 return useblock;
1140 }
1141 return u_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1142 }
1143
1144
identical_backtoback_ifs(Node * n)1145 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1146 if (!n->is_If() || n->is_CountedLoopEnd()) {
1147 return false;
1148 }
1149 if (!n->in(0)->is_Region()) {
1150 return false;
1151 }
1152 Node* region = n->in(0);
1153 Node* dom = idom(region);
1154 if (!dom->is_If() || dom->in(1) != n->in(1)) {
1155 return false;
1156 }
1157 IfNode* dom_if = dom->as_If();
1158 Node* proj_true = dom_if->proj_out(1);
1159 Node* proj_false = dom_if->proj_out(0);
1160
1161 for (uint i = 1; i < region->req(); i++) {
1162 if (is_dominator(proj_true, region->in(i))) {
1163 continue;
1164 }
1165 if (is_dominator(proj_false, region->in(i))) {
1166 continue;
1167 }
1168 return false;
1169 }
1170
1171 return true;
1172 }
1173
1174
can_split_if(Node * n_ctrl)1175 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1176 if (must_throttle_split_if()) {
1177 return false;
1178 }
1179
1180 // Do not do 'split-if' if irreducible loops are present.
1181 if (_has_irreducible_loops) {
1182 return false;
1183 }
1184
1185 if (merge_point_too_heavy(C, n_ctrl)) {
1186 return false;
1187 }
1188
1189 // Do not do 'split-if' if some paths are dead. First do dead code
1190 // elimination and then see if its still profitable.
1191 for (uint i = 1; i < n_ctrl->req(); i++) {
1192 if (n_ctrl->in(i) == C->top()) {
1193 return false;
1194 }
1195 }
1196
1197 // If trying to do a 'Split-If' at the loop head, it is only
1198 // profitable if the cmp folds up on BOTH paths. Otherwise we
1199 // risk peeling a loop forever.
1200
1201 // CNC - Disabled for now. Requires careful handling of loop
1202 // body selection for the cloned code. Also, make sure we check
1203 // for any input path not being in the same loop as n_ctrl. For
1204 // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1205 // because the alternative loop entry points won't be converted
1206 // into LoopNodes.
1207 IdealLoopTree *n_loop = get_loop(n_ctrl);
1208 for (uint j = 1; j < n_ctrl->req(); j++) {
1209 if (get_loop(n_ctrl->in(j)) != n_loop) {
1210 return false;
1211 }
1212 }
1213
1214 // Check for safety of the merge point.
1215 if (!merge_point_safe(n_ctrl)) {
1216 return false;
1217 }
1218
1219 return true;
1220 }
1221
1222 // Detect if the node is the inner strip-mined loop
1223 // Return: NULL if it's not the case, or the exit of outer strip-mined loop
is_inner_of_stripmined_loop(const Node * out)1224 static Node* is_inner_of_stripmined_loop(const Node* out) {
1225 Node* out_le = NULL;
1226
1227 if (out->is_CountedLoopEnd()) {
1228 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1229
1230 if (loop != NULL && loop->is_strip_mined()) {
1231 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1232 }
1233 }
1234
1235 return out_le;
1236 }
1237
1238 //------------------------------split_if_with_blocks_post----------------------
1239 // Do the real work in a non-recursive function. CFG hackery wants to be
1240 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1241 // info.
split_if_with_blocks_post(Node * n)1242 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1243
1244 // Cloning Cmp through Phi's involves the split-if transform.
1245 // FastLock is not used by an If
1246 if (n->is_Cmp() && !n->is_FastLock()) {
1247 Node *n_ctrl = get_ctrl(n);
1248 // Determine if the Node has inputs from some local Phi.
1249 // Returns the block to clone thru.
1250 Node *n_blk = has_local_phi_input(n);
1251 if (n_blk != n_ctrl) {
1252 return;
1253 }
1254
1255 if (!can_split_if(n_ctrl)) {
1256 return;
1257 }
1258
1259 if (n->outcnt() != 1) {
1260 return; // Multiple bool's from 1 compare?
1261 }
1262 Node *bol = n->unique_out();
1263 assert(bol->is_Bool(), "expect a bool here");
1264 if (bol->outcnt() != 1) {
1265 return;// Multiple branches from 1 compare?
1266 }
1267 Node *iff = bol->unique_out();
1268
1269 // Check some safety conditions
1270 if (iff->is_If()) { // Classic split-if?
1271 if (iff->in(0) != n_ctrl) {
1272 return; // Compare must be in same blk as if
1273 }
1274 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1275 // Can't split CMove with different control edge.
1276 if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) {
1277 return;
1278 }
1279 if (get_ctrl(iff->in(2)) == n_ctrl ||
1280 get_ctrl(iff->in(3)) == n_ctrl) {
1281 return; // Inputs not yet split-up
1282 }
1283 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1284 return; // Loop-invar test gates loop-varying CMOVE
1285 }
1286 } else {
1287 return; // some other kind of node, such as an Allocate
1288 }
1289
1290 // When is split-if profitable? Every 'win' on means some control flow
1291 // goes dead, so it's almost always a win.
1292 int policy = 0;
1293 // Split compare 'n' through the merge point if it is profitable
1294 Node *phi = split_thru_phi( n, n_ctrl, policy);
1295 if (!phi) {
1296 return;
1297 }
1298
1299 // Found a Phi to split thru!
1300 // Replace 'n' with the new phi
1301 _igvn.replace_node(n, phi);
1302
1303 // Now split the bool up thru the phi
1304 Node *bolphi = split_thru_phi(bol, n_ctrl, -1);
1305 guarantee(bolphi != NULL, "null boolean phi node");
1306
1307 _igvn.replace_node(bol, bolphi);
1308 assert(iff->in(1) == bolphi, "");
1309
1310 if (bolphi->Value(&_igvn)->singleton()) {
1311 return;
1312 }
1313
1314 // Conditional-move? Must split up now
1315 if (!iff->is_If()) {
1316 Node *cmovphi = split_thru_phi(iff, n_ctrl, -1);
1317 _igvn.replace_node(iff, cmovphi);
1318 return;
1319 }
1320
1321 // Now split the IF
1322 do_split_if(iff);
1323 return;
1324 }
1325
1326 // Two identical ifs back to back can be merged
1327 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1328 Node *n_ctrl = n->in(0);
1329 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1330 IfNode* dom_if = idom(n_ctrl)->as_If();
1331 Node* proj_true = dom_if->proj_out(1);
1332 Node* proj_false = dom_if->proj_out(0);
1333 Node* con_true = _igvn.makecon(TypeInt::ONE);
1334 Node* con_false = _igvn.makecon(TypeInt::ZERO);
1335
1336 for (uint i = 1; i < n_ctrl->req(); i++) {
1337 if (is_dominator(proj_true, n_ctrl->in(i))) {
1338 bolphi->init_req(i, con_true);
1339 } else {
1340 assert(is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1341 bolphi->init_req(i, con_false);
1342 }
1343 }
1344 register_new_node(bolphi, n_ctrl);
1345 _igvn.replace_input_of(n, 1, bolphi);
1346
1347 // Now split the IF
1348 do_split_if(n);
1349 return;
1350 }
1351
1352 // Check for an IF ready to split; one that has its
1353 // condition codes input coming from a Phi at the block start.
1354 int n_op = n->Opcode();
1355
1356 // Check for an IF being dominated by another IF same test
1357 if (n_op == Op_If ||
1358 n_op == Op_RangeCheck) {
1359 Node *bol = n->in(1);
1360 uint max = bol->outcnt();
1361 // Check for same test used more than once?
1362 if (max > 1 && bol->is_Bool()) {
1363 // Search up IDOMs to see if this IF is dominated.
1364 Node *cutoff = get_ctrl(bol);
1365
1366 // Now search up IDOMs till cutoff, looking for a dominating test
1367 Node *prevdom = n;
1368 Node *dom = idom(prevdom);
1369 while (dom != cutoff) {
1370 if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
1371 // It's invalid to move control dependent data nodes in the inner
1372 // strip-mined loop, because:
1373 // 1) break validation of LoopNode::verify_strip_mined()
1374 // 2) move code with side-effect in strip-mined loop
1375 // Move to the exit of outer strip-mined loop in that case.
1376 Node* out_le = is_inner_of_stripmined_loop(dom);
1377 if (out_le != NULL) {
1378 prevdom = out_le;
1379 }
1380 // Replace the dominated test with an obvious true or false.
1381 // Place it on the IGVN worklist for later cleanup.
1382 C->set_major_progress();
1383 dominated_by(prevdom, n, false, true);
1384 #ifndef PRODUCT
1385 if( VerifyLoopOptimizations ) verify();
1386 #endif
1387 return;
1388 }
1389 prevdom = dom;
1390 dom = idom(prevdom);
1391 }
1392 }
1393 }
1394
1395 // See if a shared loop-varying computation has no loop-varying uses.
1396 // Happens if something is only used for JVM state in uncommon trap exits,
1397 // like various versions of induction variable+offset. Clone the
1398 // computation per usage to allow it to sink out of the loop.
1399 if (has_ctrl(n) && !n->in(0)) {// n not dead and has no control edge (can float about)
1400 Node *n_ctrl = get_ctrl(n);
1401 IdealLoopTree *n_loop = get_loop(n_ctrl);
1402 if( n_loop != _ltree_root ) {
1403 DUIterator_Fast imax, i = n->fast_outs(imax);
1404 for (; i < imax; i++) {
1405 Node* u = n->fast_out(i);
1406 if( !has_ctrl(u) ) break; // Found control user
1407 IdealLoopTree *u_loop = get_loop(get_ctrl(u));
1408 if( u_loop == n_loop ) break; // Found loop-varying use
1409 if( n_loop->is_member( u_loop ) ) break; // Found use in inner loop
1410 if( u->Opcode() == Op_Opaque1 ) break; // Found loop limit, bugfix for 4677003
1411 }
1412 bool did_break = (i < imax); // Did we break out of the previous loop?
1413 if (!did_break && n->outcnt() > 1) { // All uses in outer loops!
1414 Node *late_load_ctrl = NULL;
1415 if (n->is_Load()) {
1416 // If n is a load, get and save the result from get_late_ctrl(),
1417 // to be later used in calculating the control for n's clones.
1418 clear_dom_lca_tags();
1419 late_load_ctrl = get_late_ctrl(n, n_ctrl);
1420 }
1421 // If n is a load, and the late control is the same as the current
1422 // control, then the cloning of n is a pointless exercise, because
1423 // GVN will ensure that we end up where we started.
1424 if (!n->is_Load() || late_load_ctrl != n_ctrl) {
1425 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
1426 Node *u = n->last_out(j); // Clone private computation per use
1427 _igvn.rehash_node_delayed(u);
1428 Node *x = n->clone(); // Clone computation
1429 Node *x_ctrl = NULL;
1430 if( u->is_Phi() ) {
1431 // Replace all uses of normal nodes. Replace Phi uses
1432 // individually, so the separate Nodes can sink down
1433 // different paths.
1434 uint k = 1;
1435 while( u->in(k) != n ) k++;
1436 u->set_req( k, x );
1437 // x goes next to Phi input path
1438 x_ctrl = u->in(0)->in(k);
1439 --j;
1440 } else { // Normal use
1441 // Replace all uses
1442 for( uint k = 0; k < u->req(); k++ ) {
1443 if( u->in(k) == n ) {
1444 u->set_req( k, x );
1445 --j;
1446 }
1447 }
1448 x_ctrl = get_ctrl(u);
1449 }
1450
1451 // Find control for 'x' next to use but not inside inner loops.
1452 // For inner loop uses get the preheader area.
1453 x_ctrl = place_near_use(x_ctrl);
1454
1455 if (n->is_Load()) {
1456 // For loads, add a control edge to a CFG node outside of the loop
1457 // to force them to not combine and return back inside the loop
1458 // during GVN optimization (4641526).
1459 //
1460 // Because we are setting the actual control input, factor in
1461 // the result from get_late_ctrl() so we respect any
1462 // anti-dependences. (6233005).
1463 x_ctrl = dom_lca(late_load_ctrl, x_ctrl);
1464
1465 // Don't allow the control input to be a CFG splitting node.
1466 // Such nodes should only have ProjNodes as outs, e.g. IfNode
1467 // should only have IfTrueNode and IfFalseNode (4985384).
1468 x_ctrl = find_non_split_ctrl(x_ctrl);
1469
1470 IdealLoopTree* x_loop = get_loop(x_ctrl);
1471 Node* x_head = x_loop->_head;
1472 if (x_head->is_Loop() && (x_head->is_OuterStripMinedLoop() || x_head->as_Loop()->is_strip_mined()) && is_dominator(n_ctrl, x_head)) {
1473 // Anti dependence analysis is sometimes too
1474 // conservative: a store in the outer strip mined loop
1475 // can prevent a load from floating out of the outer
1476 // strip mined loop but the load may not be referenced
1477 // from the safepoint: loop strip mining verification
1478 // code reports a problem in that case. Make sure the
1479 // load is not moved in the outer strip mined loop in
1480 // that case.
1481 x_ctrl = x_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1482 }
1483 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1484
1485 x->set_req(0, x_ctrl);
1486 }
1487 register_new_node(x, x_ctrl);
1488
1489 // Some institutional knowledge is needed here: 'x' is
1490 // yanked because if the optimizer runs GVN on it all the
1491 // cloned x's will common up and undo this optimization and
1492 // be forced back in the loop.
1493 // I tried setting control edges on the x's to force them to
1494 // not combine, but the matching gets worried when it tries
1495 // to fold a StoreP and an AddP together (as part of an
1496 // address expression) and the AddP and StoreP have
1497 // different controls.
1498 if (!x->is_Load() && !x->is_DecodeNarrowPtr()) _igvn._worklist.yank(x);
1499 }
1500 _igvn.remove_dead_node(n);
1501 }
1502 }
1503 }
1504 }
1505
1506 try_move_store_after_loop(n);
1507
1508 // Check for Opaque2's who's loop has disappeared - who's input is in the
1509 // same loop nest as their output. Remove 'em, they are no longer useful.
1510 if( n_op == Op_Opaque2 &&
1511 n->in(1) != NULL &&
1512 get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
1513 _igvn.replace_node( n, n->in(1) );
1514 }
1515 }
1516
1517 //------------------------------split_if_with_blocks---------------------------
1518 // Check for aggressive application of 'split-if' optimization,
1519 // using basic block level info.
split_if_with_blocks(VectorSet & visited,Node_Stack & nstack)1520 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
1521 Node* root = C->root();
1522 visited.set(root->_idx); // first, mark root as visited
1523 // Do pre-visit work for root
1524 Node* n = split_if_with_blocks_pre(root);
1525 uint cnt = n->outcnt();
1526 uint i = 0;
1527
1528 while (true) {
1529 // Visit all children
1530 if (i < cnt) {
1531 Node* use = n->raw_out(i);
1532 ++i;
1533 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
1534 // Now do pre-visit work for this use
1535 use = split_if_with_blocks_pre(use);
1536 nstack.push(n, i); // Save parent and next use's index.
1537 n = use; // Process all children of current use.
1538 cnt = use->outcnt();
1539 i = 0;
1540 }
1541 }
1542 else {
1543 // All of n's children have been processed, complete post-processing.
1544 if (cnt != 0 && !n->is_Con()) {
1545 assert(has_node(n), "no dead nodes");
1546 split_if_with_blocks_post(n);
1547 }
1548 if (must_throttle_split_if()) {
1549 nstack.clear();
1550 }
1551 if (nstack.is_empty()) {
1552 // Finished all nodes on stack.
1553 break;
1554 }
1555 // Get saved parent node and next use's index. Visit the rest of uses.
1556 n = nstack.node();
1557 cnt = n->outcnt();
1558 i = nstack.index();
1559 nstack.pop();
1560 }
1561 }
1562 }
1563
1564
1565 //=============================================================================
1566 //
1567 // C L O N E A L O O P B O D Y
1568 //
1569
1570 //------------------------------clone_iff--------------------------------------
1571 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1572 // "Nearly" because all Nodes have been cloned from the original in the loop,
1573 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
1574 // through the Phi recursively, and return a Bool.
clone_iff(PhiNode * phi,IdealLoopTree * loop)1575 Node* PhaseIdealLoop::clone_iff(PhiNode *phi, IdealLoopTree *loop) {
1576
1577 // Convert this Phi into a Phi merging Bools
1578 uint i;
1579 for (i = 1; i < phi->req(); i++) {
1580 Node *b = phi->in(i);
1581 if (b->is_Phi()) {
1582 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi(), loop));
1583 } else {
1584 assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1585 }
1586 }
1587
1588 Node* n = phi->in(1);
1589 Node* sample_opaque = NULL;
1590 Node *sample_bool = NULL;
1591 if (n->Opcode() == Op_Opaque4) {
1592 sample_opaque = n;
1593 sample_bool = n->in(1);
1594 assert(sample_bool->is_Bool(), "wrong type");
1595 } else {
1596 sample_bool = n;
1597 }
1598 Node *sample_cmp = sample_bool->in(1);
1599
1600 // Make Phis to merge the Cmp's inputs.
1601 PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1602 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1603 for (i = 1; i < phi->req(); i++) {
1604 Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1605 Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1606 phi1->set_req(i, n1);
1607 phi2->set_req(i, n2);
1608 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1609 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1610 }
1611 // See if these Phis have been made before.
1612 // Register with optimizer
1613 Node *hit1 = _igvn.hash_find_insert(phi1);
1614 if (hit1) { // Hit, toss just made Phi
1615 _igvn.remove_dead_node(phi1); // Remove new phi
1616 assert(hit1->is_Phi(), "" );
1617 phi1 = (PhiNode*)hit1; // Use existing phi
1618 } else { // Miss
1619 _igvn.register_new_node_with_optimizer(phi1);
1620 }
1621 Node *hit2 = _igvn.hash_find_insert(phi2);
1622 if (hit2) { // Hit, toss just made Phi
1623 _igvn.remove_dead_node(phi2); // Remove new phi
1624 assert(hit2->is_Phi(), "" );
1625 phi2 = (PhiNode*)hit2; // Use existing phi
1626 } else { // Miss
1627 _igvn.register_new_node_with_optimizer(phi2);
1628 }
1629 // Register Phis with loop/block info
1630 set_ctrl(phi1, phi->in(0));
1631 set_ctrl(phi2, phi->in(0));
1632 // Make a new Cmp
1633 Node *cmp = sample_cmp->clone();
1634 cmp->set_req(1, phi1);
1635 cmp->set_req(2, phi2);
1636 _igvn.register_new_node_with_optimizer(cmp);
1637 set_ctrl(cmp, phi->in(0));
1638
1639 // Make a new Bool
1640 Node *b = sample_bool->clone();
1641 b->set_req(1,cmp);
1642 _igvn.register_new_node_with_optimizer(b);
1643 set_ctrl(b, phi->in(0));
1644
1645 if (sample_opaque != NULL) {
1646 Node* opaque = sample_opaque->clone();
1647 opaque->set_req(1, b);
1648 _igvn.register_new_node_with_optimizer(opaque);
1649 set_ctrl(opaque, phi->in(0));
1650 return opaque;
1651 }
1652
1653 assert(b->is_Bool(), "");
1654 return b;
1655 }
1656
1657 //------------------------------clone_bool-------------------------------------
1658 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1659 // "Nearly" because all Nodes have been cloned from the original in the loop,
1660 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
1661 // through the Phi recursively, and return a Bool.
clone_bool(PhiNode * phi,IdealLoopTree * loop)1662 CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
1663 uint i;
1664 // Convert this Phi into a Phi merging Bools
1665 for( i = 1; i < phi->req(); i++ ) {
1666 Node *b = phi->in(i);
1667 if( b->is_Phi() ) {
1668 _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop ));
1669 } else {
1670 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
1671 }
1672 }
1673
1674 Node *sample_cmp = phi->in(1);
1675
1676 // Make Phis to merge the Cmp's inputs.
1677 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
1678 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
1679 for( uint j = 1; j < phi->req(); j++ ) {
1680 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
1681 Node *n1, *n2;
1682 if( cmp_top->is_Cmp() ) {
1683 n1 = cmp_top->in(1);
1684 n2 = cmp_top->in(2);
1685 } else {
1686 n1 = n2 = cmp_top;
1687 }
1688 phi1->set_req( j, n1 );
1689 phi2->set_req( j, n2 );
1690 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1691 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1692 }
1693
1694 // See if these Phis have been made before.
1695 // Register with optimizer
1696 Node *hit1 = _igvn.hash_find_insert(phi1);
1697 if( hit1 ) { // Hit, toss just made Phi
1698 _igvn.remove_dead_node(phi1); // Remove new phi
1699 assert( hit1->is_Phi(), "" );
1700 phi1 = (PhiNode*)hit1; // Use existing phi
1701 } else { // Miss
1702 _igvn.register_new_node_with_optimizer(phi1);
1703 }
1704 Node *hit2 = _igvn.hash_find_insert(phi2);
1705 if( hit2 ) { // Hit, toss just made Phi
1706 _igvn.remove_dead_node(phi2); // Remove new phi
1707 assert( hit2->is_Phi(), "" );
1708 phi2 = (PhiNode*)hit2; // Use existing phi
1709 } else { // Miss
1710 _igvn.register_new_node_with_optimizer(phi2);
1711 }
1712 // Register Phis with loop/block info
1713 set_ctrl(phi1, phi->in(0));
1714 set_ctrl(phi2, phi->in(0));
1715 // Make a new Cmp
1716 Node *cmp = sample_cmp->clone();
1717 cmp->set_req( 1, phi1 );
1718 cmp->set_req( 2, phi2 );
1719 _igvn.register_new_node_with_optimizer(cmp);
1720 set_ctrl(cmp, phi->in(0));
1721
1722 assert( cmp->is_Cmp(), "" );
1723 return (CmpNode*)cmp;
1724 }
1725
1726 //------------------------------sink_use---------------------------------------
1727 // If 'use' was in the loop-exit block, it now needs to be sunk
1728 // below the post-loop merge point.
sink_use(Node * use,Node * post_loop)1729 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
1730 if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) {
1731 set_ctrl(use, post_loop);
1732 for (DUIterator j = use->outs(); use->has_out(j); j++)
1733 sink_use(use->out(j), post_loop);
1734 }
1735 }
1736
clone_loop_handle_data_uses(Node * old,Node_List & old_new,IdealLoopTree * loop,IdealLoopTree * outer_loop,Node_List * & split_if_set,Node_List * & split_bool_set,Node_List * & split_cex_set,Node_List & worklist,uint new_counter,CloneLoopMode mode)1737 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
1738 IdealLoopTree* loop, IdealLoopTree* outer_loop,
1739 Node_List*& split_if_set, Node_List*& split_bool_set,
1740 Node_List*& split_cex_set, Node_List& worklist,
1741 uint new_counter, CloneLoopMode mode) {
1742 Node* nnn = old_new[old->_idx];
1743 // Copy uses to a worklist, so I can munge the def-use info
1744 // with impunity.
1745 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
1746 worklist.push(old->fast_out(j));
1747
1748 while( worklist.size() ) {
1749 Node *use = worklist.pop();
1750 if (!has_node(use)) continue; // Ignore dead nodes
1751 if (use->in(0) == C->top()) continue;
1752 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
1753 // Check for data-use outside of loop - at least one of OLD or USE
1754 // must not be a CFG node.
1755 #ifdef ASSERT
1756 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == NULL) {
1757 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
1758 assert(mode != IgnoreStripMined, "incorrect cloning mode");
1759 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
1760 }
1761 #endif
1762 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
1763
1764 // If the Data use is an IF, that means we have an IF outside of the
1765 // loop that is switching on a condition that is set inside of the
1766 // loop. Happens if people set a loop-exit flag; then test the flag
1767 // in the loop to break the loop, then test is again outside of the
1768 // loop to determine which way the loop exited.
1769 // Loop predicate If node connects to Bool node through Opaque1 node.
1770 if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use) || use->Opcode() == Op_Opaque4) {
1771 // Since this code is highly unlikely, we lazily build the worklist
1772 // of such Nodes to go split.
1773 if (!split_if_set) {
1774 ResourceArea *area = Thread::current()->resource_area();
1775 split_if_set = new Node_List(area);
1776 }
1777 split_if_set->push(use);
1778 }
1779 if (use->is_Bool()) {
1780 if (!split_bool_set) {
1781 ResourceArea *area = Thread::current()->resource_area();
1782 split_bool_set = new Node_List(area);
1783 }
1784 split_bool_set->push(use);
1785 }
1786 if (use->Opcode() == Op_CreateEx) {
1787 if (!split_cex_set) {
1788 ResourceArea *area = Thread::current()->resource_area();
1789 split_cex_set = new Node_List(area);
1790 }
1791 split_cex_set->push(use);
1792 }
1793
1794
1795 // Get "block" use is in
1796 uint idx = 0;
1797 while( use->in(idx) != old ) idx++;
1798 Node *prev = use->is_CFG() ? use : get_ctrl(use);
1799 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
1800 Node *cfg = prev->_idx >= new_counter
1801 ? prev->in(2)
1802 : idom(prev);
1803 if( use->is_Phi() ) // Phi use is in prior block
1804 cfg = prev->in(idx); // NOT in block of Phi itself
1805 if (cfg->is_top()) { // Use is dead?
1806 _igvn.replace_input_of(use, idx, C->top());
1807 continue;
1808 }
1809
1810 // If use is referenced through control edge... (idx == 0)
1811 if (mode == IgnoreStripMined && idx == 0) {
1812 LoopNode *head = loop->_head->as_Loop();
1813 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
1814 // That node is outside the inner loop, leave it outside the
1815 // outer loop as well to not confuse verification code.
1816 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
1817 _igvn.replace_input_of(use, 0, head->outer_loop_exit());
1818 continue;
1819 }
1820 }
1821
1822 while(!outer_loop->is_member(get_loop(cfg))) {
1823 prev = cfg;
1824 cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
1825 }
1826 // If the use occurs after merging several exits from the loop, then
1827 // old value must have dominated all those exits. Since the same old
1828 // value was used on all those exits we did not need a Phi at this
1829 // merge point. NOW we do need a Phi here. Each loop exit value
1830 // is now merged with the peeled body exit; each exit gets its own
1831 // private Phi and those Phis need to be merged here.
1832 Node *phi;
1833 if( prev->is_Region() ) {
1834 if( idx == 0 ) { // Updating control edge?
1835 phi = prev; // Just use existing control
1836 } else { // Else need a new Phi
1837 phi = PhiNode::make( prev, old );
1838 // Now recursively fix up the new uses of old!
1839 for( uint i = 1; i < prev->req(); i++ ) {
1840 worklist.push(phi); // Onto worklist once for each 'old' input
1841 }
1842 }
1843 } else {
1844 // Get new RegionNode merging old and new loop exits
1845 prev = old_new[prev->_idx];
1846 assert( prev, "just made this in step 7" );
1847 if( idx == 0) { // Updating control edge?
1848 phi = prev; // Just use existing control
1849 } else { // Else need a new Phi
1850 // Make a new Phi merging data values properly
1851 phi = PhiNode::make( prev, old );
1852 phi->set_req( 1, nnn );
1853 }
1854 }
1855 // If inserting a new Phi, check for prior hits
1856 if( idx != 0 ) {
1857 Node *hit = _igvn.hash_find_insert(phi);
1858 if( hit == NULL ) {
1859 _igvn.register_new_node_with_optimizer(phi); // Register new phi
1860 } else { // or
1861 // Remove the new phi from the graph and use the hit
1862 _igvn.remove_dead_node(phi);
1863 phi = hit; // Use existing phi
1864 }
1865 set_ctrl(phi, prev);
1866 }
1867 // Make 'use' use the Phi instead of the old loop body exit value
1868 _igvn.replace_input_of(use, idx, phi);
1869 if( use->_idx >= new_counter ) { // If updating new phis
1870 // Not needed for correctness, but prevents a weak assert
1871 // in AddPNode from tripping (when we end up with different
1872 // base & derived Phis that will become the same after
1873 // IGVN does CSE).
1874 Node *hit = _igvn.hash_find_insert(use);
1875 if( hit ) // Go ahead and re-hash for hits.
1876 _igvn.replace_node( use, hit );
1877 }
1878
1879 // If 'use' was in the loop-exit block, it now needs to be sunk
1880 // below the post-loop merge point.
1881 sink_use( use, prev );
1882 }
1883 }
1884 }
1885
clone_outer_loop_helper(Node * n,const IdealLoopTree * loop,const IdealLoopTree * outer_loop,const Node_List & old_new,Unique_Node_List & wq,PhaseIdealLoop * phase,bool check_old_new)1886 static void clone_outer_loop_helper(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
1887 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
1888 bool check_old_new) {
1889 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1890 Node* u = n->fast_out(j);
1891 assert(check_old_new || old_new[u->_idx] == NULL, "shouldn't have been cloned");
1892 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == NULL)) {
1893 Node* c = phase->get_ctrl(u);
1894 IdealLoopTree* u_loop = phase->get_loop(c);
1895 assert(!loop->is_member(u_loop), "can be in outer loop or out of both loops only");
1896 if (outer_loop->is_member(u_loop)) {
1897 wq.push(u);
1898 }
1899 }
1900 }
1901 }
1902
clone_outer_loop(LoopNode * head,CloneLoopMode mode,IdealLoopTree * loop,IdealLoopTree * outer_loop,int dd,Node_List & old_new,Node_List & extra_data_nodes)1903 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
1904 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
1905 Node_List& extra_data_nodes) {
1906 if (head->is_strip_mined() && mode != IgnoreStripMined) {
1907 CountedLoopNode* cl = head->as_CountedLoop();
1908 Node* l = cl->outer_loop();
1909 Node* tail = cl->outer_loop_tail();
1910 IfNode* le = cl->outer_loop_end();
1911 Node* sfpt = cl->outer_safepoint();
1912 CountedLoopEndNode* cle = cl->loopexit();
1913 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
1914 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
1915 Node* cle_out = cle->proj_out(false);
1916
1917 Node* new_sfpt = NULL;
1918 Node* new_cle_out = cle_out->clone();
1919 old_new.map(cle_out->_idx, new_cle_out);
1920 if (mode == CloneIncludesStripMined) {
1921 // clone outer loop body
1922 Node* new_l = l->clone();
1923 Node* new_tail = tail->clone();
1924 IfNode* new_le = le->clone()->as_If();
1925 new_sfpt = sfpt->clone();
1926
1927 set_loop(new_l, outer_loop->_parent);
1928 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
1929 set_loop(new_cle_out, outer_loop->_parent);
1930 set_idom(new_cle_out, new_cle, dd);
1931 set_loop(new_sfpt, outer_loop->_parent);
1932 set_idom(new_sfpt, new_cle_out, dd);
1933 set_loop(new_le, outer_loop->_parent);
1934 set_idom(new_le, new_sfpt, dd);
1935 set_loop(new_tail, outer_loop->_parent);
1936 set_idom(new_tail, new_le, dd);
1937 set_idom(new_cl, new_l, dd);
1938
1939 old_new.map(l->_idx, new_l);
1940 old_new.map(tail->_idx, new_tail);
1941 old_new.map(le->_idx, new_le);
1942 old_new.map(sfpt->_idx, new_sfpt);
1943
1944 new_l->set_req(LoopNode::LoopBackControl, new_tail);
1945 new_l->set_req(0, new_l);
1946 new_tail->set_req(0, new_le);
1947 new_le->set_req(0, new_sfpt);
1948 new_sfpt->set_req(0, new_cle_out);
1949 new_cle_out->set_req(0, new_cle);
1950 new_cl->set_req(LoopNode::EntryControl, new_l);
1951
1952 _igvn.register_new_node_with_optimizer(new_l);
1953 _igvn.register_new_node_with_optimizer(new_tail);
1954 _igvn.register_new_node_with_optimizer(new_le);
1955 } else {
1956 Node *newhead = old_new[loop->_head->_idx];
1957 newhead->as_Loop()->clear_strip_mined();
1958 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
1959 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
1960 }
1961 // Look at data node that were assigned a control in the outer
1962 // loop: they are kept in the outer loop by the safepoint so start
1963 // from the safepoint node's inputs.
1964 IdealLoopTree* outer_loop = get_loop(l);
1965 Node_Stack stack(2);
1966 stack.push(sfpt, 1);
1967 uint new_counter = C->unique();
1968 while (stack.size() > 0) {
1969 Node* n = stack.node();
1970 uint i = stack.index();
1971 while (i < n->req() &&
1972 (n->in(i) == NULL ||
1973 !has_ctrl(n->in(i)) ||
1974 get_loop(get_ctrl(n->in(i))) != outer_loop ||
1975 (old_new[n->in(i)->_idx] != NULL && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
1976 i++;
1977 }
1978 if (i < n->req()) {
1979 stack.set_index(i+1);
1980 stack.push(n->in(i), 0);
1981 } else {
1982 assert(old_new[n->_idx] == NULL || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
1983 Node* m = n == sfpt ? new_sfpt : n->clone();
1984 if (m != NULL) {
1985 for (uint i = 0; i < n->req(); i++) {
1986 if (m->in(i) != NULL && old_new[m->in(i)->_idx] != NULL) {
1987 m->set_req(i, old_new[m->in(i)->_idx]);
1988 }
1989 }
1990 } else {
1991 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
1992 }
1993 if (n != sfpt) {
1994 extra_data_nodes.push(n);
1995 _igvn.register_new_node_with_optimizer(m);
1996 assert(get_ctrl(n) == cle_out, "what other control?");
1997 set_ctrl(m, new_cle_out);
1998 old_new.map(n->_idx, m);
1999 }
2000 stack.pop();
2001 }
2002 }
2003 if (mode == CloneIncludesStripMined) {
2004 _igvn.register_new_node_with_optimizer(new_sfpt);
2005 _igvn.register_new_node_with_optimizer(new_cle_out);
2006 }
2007 // Some other transformation may have pessimistically assign some
2008 // data nodes to the outer loop. Set their control so they are out
2009 // of the outer loop.
2010 ResourceMark rm;
2011 Unique_Node_List wq;
2012 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2013 Node* old = extra_data_nodes.at(i);
2014 clone_outer_loop_helper(old, loop, outer_loop, old_new, wq, this, true);
2015 }
2016 Node* new_ctrl = cl->outer_loop_exit();
2017 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2018 for (uint i = 0; i < wq.size(); i++) {
2019 Node* n = wq.at(i);
2020 set_ctrl(n, new_ctrl);
2021 clone_outer_loop_helper(n, loop, outer_loop, old_new, wq, this, false);
2022 }
2023 } else {
2024 Node *newhead = old_new[loop->_head->_idx];
2025 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2026 }
2027 }
2028
2029 //------------------------------clone_loop-------------------------------------
2030 //
2031 // C L O N E A L O O P B O D Y
2032 //
2033 // This is the basic building block of the loop optimizations. It clones an
2034 // entire loop body. It makes an old_new loop body mapping; with this mapping
2035 // you can find the new-loop equivalent to an old-loop node. All new-loop
2036 // nodes are exactly equal to their old-loop counterparts, all edges are the
2037 // same. All exits from the old-loop now have a RegionNode that merges the
2038 // equivalent new-loop path. This is true even for the normal "loop-exit"
2039 // condition. All uses of loop-invariant old-loop values now come from (one
2040 // or more) Phis that merge their new-loop equivalents.
2041 //
2042 // This operation leaves the graph in an illegal state: there are two valid
2043 // control edges coming from the loop pre-header to both loop bodies. I'll
2044 // definitely have to hack the graph after running this transform.
2045 //
2046 // From this building block I will further edit edges to perform loop peeling
2047 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2048 //
2049 // Parameter side_by_size_idom:
2050 // When side_by_size_idom is NULL, the dominator tree is constructed for
2051 // the clone loop to dominate the original. Used in construction of
2052 // pre-main-post loop sequence.
2053 // When nonnull, the clone and original are side-by-side, both are
2054 // dominated by the side_by_side_idom node. Used in construction of
2055 // unswitched loops.
clone_loop(IdealLoopTree * loop,Node_List & old_new,int dd,CloneLoopMode mode,Node * side_by_side_idom)2056 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2057 CloneLoopMode mode, Node* side_by_side_idom) {
2058
2059 LoopNode* head = loop->_head->as_Loop();
2060 head->verify_strip_mined(1);
2061
2062 if (C->do_vector_loop() && PrintOpto) {
2063 const char* mname = C->method()->name()->as_quoted_ascii();
2064 if (mname != NULL) {
2065 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2066 }
2067 }
2068
2069 CloneMap& cm = C->clone_map();
2070 Dict* dict = cm.dict();
2071 if (C->do_vector_loop()) {
2072 cm.set_clone_idx(cm.max_gen()+1);
2073 #ifndef PRODUCT
2074 if (PrintOpto) {
2075 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2076 loop->dump_head();
2077 }
2078 #endif
2079 }
2080
2081 // Step 1: Clone the loop body. Make the old->new mapping.
2082 uint i;
2083 for( i = 0; i < loop->_body.size(); i++ ) {
2084 Node *old = loop->_body.at(i);
2085 Node *nnn = old->clone();
2086 old_new.map( old->_idx, nnn );
2087 if (C->do_vector_loop()) {
2088 cm.verify_insert_and_clone(old, nnn, cm.clone_idx());
2089 }
2090 _igvn.register_new_node_with_optimizer(nnn);
2091 }
2092
2093 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2094
2095 // Step 2: Fix the edges in the new body. If the old input is outside the
2096 // loop use it. If the old input is INside the loop, use the corresponding
2097 // new node instead.
2098 for( i = 0; i < loop->_body.size(); i++ ) {
2099 Node *old = loop->_body.at(i);
2100 Node *nnn = old_new[old->_idx];
2101 // Fix CFG/Loop controlling the new node
2102 if (has_ctrl(old)) {
2103 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2104 } else {
2105 set_loop(nnn, outer_loop->_parent);
2106 if (old->outcnt() > 0) {
2107 set_idom( nnn, old_new[idom(old)->_idx], dd );
2108 }
2109 }
2110 // Correct edges to the new node
2111 for( uint j = 0; j < nnn->req(); j++ ) {
2112 Node *n = nnn->in(j);
2113 if( n ) {
2114 IdealLoopTree *old_in_loop = get_loop( has_ctrl(n) ? get_ctrl(n) : n );
2115 if( loop->is_member( old_in_loop ) )
2116 nnn->set_req(j, old_new[n->_idx]);
2117 }
2118 }
2119 _igvn.hash_find_insert(nnn);
2120 }
2121
2122 ResourceArea *area = Thread::current()->resource_area();
2123 Node_List extra_data_nodes(area); // data nodes in the outer strip mined loop
2124 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2125
2126 // Step 3: Now fix control uses. Loop varying control uses have already
2127 // been fixed up (as part of all input edges in Step 2). Loop invariant
2128 // control uses must be either an IfFalse or an IfTrue. Make a merge
2129 // point to merge the old and new IfFalse/IfTrue nodes; make the use
2130 // refer to this.
2131 Node_List worklist(area);
2132 uint new_counter = C->unique();
2133 for( i = 0; i < loop->_body.size(); i++ ) {
2134 Node* old = loop->_body.at(i);
2135 if( !old->is_CFG() ) continue;
2136
2137 // Copy uses to a worklist, so I can munge the def-use info
2138 // with impunity.
2139 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2140 worklist.push(old->fast_out(j));
2141
2142 while( worklist.size() ) { // Visit all uses
2143 Node *use = worklist.pop();
2144 if (!has_node(use)) continue; // Ignore dead nodes
2145 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2146 if( !loop->is_member( use_loop ) && use->is_CFG() ) {
2147 // Both OLD and USE are CFG nodes here.
2148 assert( use->is_Proj(), "" );
2149 Node* nnn = old_new[old->_idx];
2150
2151 Node* newuse = NULL;
2152 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2153 CountedLoopNode* cl = head->as_CountedLoop();
2154 CountedLoopEndNode* cle = cl->loopexit();
2155 Node* cle_out = cle->proj_out_or_null(false);
2156 if (use == cle_out) {
2157 IfNode* le = cl->outer_loop_end();
2158 use = le->proj_out(false);
2159 use_loop = get_loop(use);
2160 if (mode == CloneIncludesStripMined) {
2161 nnn = old_new[le->_idx];
2162 } else {
2163 newuse = old_new[cle_out->_idx];
2164 }
2165 }
2166 }
2167 if (newuse == NULL) {
2168 newuse = use->clone();
2169 }
2170
2171 // Clone the loop exit control projection
2172 if (C->do_vector_loop()) {
2173 cm.verify_insert_and_clone(use, newuse, cm.clone_idx());
2174 }
2175 newuse->set_req(0,nnn);
2176 _igvn.register_new_node_with_optimizer(newuse);
2177 set_loop(newuse, use_loop);
2178 set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2179
2180 // We need a Region to merge the exit from the peeled body and the
2181 // exit from the old loop body.
2182 RegionNode *r = new RegionNode(3);
2183 // Map the old use to the new merge point
2184 old_new.map( use->_idx, r );
2185 uint dd_r = MIN2(dom_depth(newuse),dom_depth(use));
2186 assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" );
2187
2188 // The original user of 'use' uses 'r' instead.
2189 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2190 Node* useuse = use->last_out(l);
2191 _igvn.rehash_node_delayed(useuse);
2192 uint uses_found = 0;
2193 if( useuse->in(0) == use ) {
2194 useuse->set_req(0, r);
2195 uses_found++;
2196 if( useuse->is_CFG() ) {
2197 // This is not a dom_depth > dd_r because when new
2198 // control flow is constructed by a loop opt, a node and
2199 // its dominator can end up at the same dom_depth
2200 assert(dom_depth(useuse) >= dd_r, "");
2201 set_idom(useuse, r, dom_depth(useuse));
2202 }
2203 }
2204 for( uint k = 1; k < useuse->req(); k++ ) {
2205 if( useuse->in(k) == use ) {
2206 useuse->set_req(k, r);
2207 uses_found++;
2208 if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2209 // This is not a dom_depth > dd_r because when new
2210 // control flow is constructed by a loop opt, a node
2211 // and its dominator can end up at the same dom_depth
2212 assert(dom_depth(useuse) >= dd_r , "");
2213 set_idom(useuse, r, dom_depth(useuse));
2214 }
2215 }
2216 }
2217 l -= uses_found; // we deleted 1 or more copies of this edge
2218 }
2219
2220 // Now finish up 'r'
2221 r->set_req( 1, newuse );
2222 r->set_req( 2, use );
2223 _igvn.register_new_node_with_optimizer(r);
2224 set_loop(r, use_loop);
2225 set_idom(r, !side_by_side_idom ? newuse->in(0) : side_by_side_idom, dd_r);
2226 } // End of if a loop-exit test
2227 }
2228 }
2229
2230 // Step 4: If loop-invariant use is not control, it must be dominated by a
2231 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region
2232 // there if needed. Make a Phi there merging old and new used values.
2233 Node_List *split_if_set = NULL;
2234 Node_List *split_bool_set = NULL;
2235 Node_List *split_cex_set = NULL;
2236 for( i = 0; i < loop->_body.size(); i++ ) {
2237 Node* old = loop->_body.at(i);
2238 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2239 split_bool_set, split_cex_set, worklist, new_counter,
2240 mode);
2241 }
2242
2243 for (i = 0; i < extra_data_nodes.size(); i++) {
2244 Node* old = extra_data_nodes.at(i);
2245 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2246 split_bool_set, split_cex_set, worklist, new_counter,
2247 mode);
2248 }
2249
2250 // Check for IFs that need splitting/cloning. Happens if an IF outside of
2251 // the loop uses a condition set in the loop. The original IF probably
2252 // takes control from one or more OLD Regions (which in turn get from NEW
2253 // Regions). In any case, there will be a set of Phis for each merge point
2254 // from the IF up to where the original BOOL def exists the loop.
2255 if (split_if_set) {
2256 while (split_if_set->size()) {
2257 Node *iff = split_if_set->pop();
2258 if (iff->in(1)->is_Phi()) {
2259 Node *b = clone_iff(iff->in(1)->as_Phi(), loop);
2260 _igvn.replace_input_of(iff, 1, b);
2261 }
2262 }
2263 }
2264 if (split_bool_set) {
2265 while (split_bool_set->size()) {
2266 Node *b = split_bool_set->pop();
2267 Node *phi = b->in(1);
2268 assert(phi->is_Phi(), "");
2269 CmpNode *cmp = clone_bool((PhiNode*)phi, loop);
2270 _igvn.replace_input_of(b, 1, cmp);
2271 }
2272 }
2273 if (split_cex_set) {
2274 while (split_cex_set->size()) {
2275 Node *b = split_cex_set->pop();
2276 assert(b->in(0)->is_Region(), "");
2277 assert(b->in(1)->is_Phi(), "");
2278 assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2279 split_up(b, b->in(0), NULL);
2280 }
2281 }
2282
2283 }
2284
2285
2286 //---------------------- stride_of_possible_iv -------------------------------------
2287 // Looks for an iff/bool/comp with one operand of the compare
2288 // being a cycle involving an add and a phi,
2289 // with an optional truncation (left-shift followed by a right-shift)
2290 // of the add. Returns zero if not an iv.
stride_of_possible_iv(Node * iff)2291 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
2292 Node* trunc1 = NULL;
2293 Node* trunc2 = NULL;
2294 const TypeInt* ttype = NULL;
2295 if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) {
2296 return 0;
2297 }
2298 BoolNode* bl = iff->in(1)->as_Bool();
2299 Node* cmp = bl->in(1);
2300 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
2301 return 0;
2302 }
2303 // Must have an invariant operand
2304 if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) {
2305 return 0;
2306 }
2307 Node* add2 = NULL;
2308 Node* cmp1 = cmp->in(1);
2309 if (cmp1->is_Phi()) {
2310 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
2311 Node* phi = cmp1;
2312 for (uint i = 1; i < phi->req(); i++) {
2313 Node* in = phi->in(i);
2314 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
2315 &trunc1, &trunc2, &ttype);
2316 if (add && add->in(1) == phi) {
2317 add2 = add->in(2);
2318 break;
2319 }
2320 }
2321 } else {
2322 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
2323 Node* addtrunc = cmp1;
2324 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
2325 &trunc1, &trunc2, &ttype);
2326 if (add && add->in(1)->is_Phi()) {
2327 Node* phi = add->in(1);
2328 for (uint i = 1; i < phi->req(); i++) {
2329 if (phi->in(i) == addtrunc) {
2330 add2 = add->in(2);
2331 break;
2332 }
2333 }
2334 }
2335 }
2336 if (add2 != NULL) {
2337 const TypeInt* add2t = _igvn.type(add2)->is_int();
2338 if (add2t->is_con()) {
2339 return add2t->get_con();
2340 }
2341 }
2342 return 0;
2343 }
2344
2345
2346 //---------------------- stay_in_loop -------------------------------------
2347 // Return the (unique) control output node that's in the loop (if it exists.)
stay_in_loop(Node * n,IdealLoopTree * loop)2348 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
2349 Node* unique = NULL;
2350 if (!n) return NULL;
2351 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2352 Node* use = n->fast_out(i);
2353 if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
2354 if (unique != NULL) {
2355 return NULL;
2356 }
2357 unique = use;
2358 }
2359 }
2360 return unique;
2361 }
2362
2363 //------------------------------ register_node -------------------------------------
2364 // Utility to register node "n" with PhaseIdealLoop
register_node(Node * n,IdealLoopTree * loop,Node * pred,int ddepth)2365 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth) {
2366 _igvn.register_new_node_with_optimizer(n);
2367 loop->_body.push(n);
2368 if (n->is_CFG()) {
2369 set_loop(n, loop);
2370 set_idom(n, pred, ddepth);
2371 } else {
2372 set_ctrl(n, pred);
2373 }
2374 }
2375
2376 //------------------------------ proj_clone -------------------------------------
2377 // Utility to create an if-projection
proj_clone(ProjNode * p,IfNode * iff)2378 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
2379 ProjNode* c = p->clone()->as_Proj();
2380 c->set_req(0, iff);
2381 return c;
2382 }
2383
2384 //------------------------------ short_circuit_if -------------------------------------
2385 // Force the iff control output to be the live_proj
short_circuit_if(IfNode * iff,ProjNode * live_proj)2386 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
2387 guarantee(live_proj != NULL, "null projection");
2388 int proj_con = live_proj->_con;
2389 assert(proj_con == 0 || proj_con == 1, "false or true projection");
2390 Node *con = _igvn.intcon(proj_con);
2391 set_ctrl(con, C->root());
2392 if (iff) {
2393 iff->set_req(1, con);
2394 }
2395 return con;
2396 }
2397
2398 //------------------------------ insert_if_before_proj -------------------------------------
2399 // Insert a new if before an if projection (* - new node)
2400 //
2401 // before
2402 // if(test)
2403 // / \
2404 // v v
2405 // other-proj proj (arg)
2406 //
2407 // after
2408 // if(test)
2409 // / \
2410 // / v
2411 // | * proj-clone
2412 // v |
2413 // other-proj v
2414 // * new_if(relop(cmp[IU](left,right)))
2415 // / \
2416 // v v
2417 // * new-proj proj
2418 // (returned)
2419 //
insert_if_before_proj(Node * left,bool Signed,BoolTest::mask relop,Node * right,ProjNode * proj)2420 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
2421 IfNode* iff = proj->in(0)->as_If();
2422 IdealLoopTree *loop = get_loop(proj);
2423 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2424 int ddepth = dom_depth(proj);
2425
2426 _igvn.rehash_node_delayed(iff);
2427 _igvn.rehash_node_delayed(proj);
2428
2429 proj->set_req(0, NULL); // temporary disconnect
2430 ProjNode* proj2 = proj_clone(proj, iff);
2431 register_node(proj2, loop, iff, ddepth);
2432
2433 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
2434 register_node(cmp, loop, proj2, ddepth);
2435
2436 BoolNode* bol = new BoolNode(cmp, relop);
2437 register_node(bol, loop, proj2, ddepth);
2438
2439 int opcode = iff->Opcode();
2440 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
2441 IfNode* new_if = (opcode == Op_If) ? new IfNode(proj2, bol, iff->_prob, iff->_fcnt):
2442 new RangeCheckNode(proj2, bol, iff->_prob, iff->_fcnt);
2443 register_node(new_if, loop, proj2, ddepth);
2444
2445 proj->set_req(0, new_if); // reattach
2446 set_idom(proj, new_if, ddepth);
2447
2448 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
2449 guarantee(new_exit != NULL, "null exit node");
2450 register_node(new_exit, get_loop(other_proj), new_if, ddepth);
2451
2452 return new_exit;
2453 }
2454
2455 //------------------------------ insert_region_before_proj -------------------------------------
2456 // Insert a region before an if projection (* - new node)
2457 //
2458 // before
2459 // if(test)
2460 // / |
2461 // v |
2462 // proj v
2463 // other-proj
2464 //
2465 // after
2466 // if(test)
2467 // / |
2468 // v |
2469 // * proj-clone v
2470 // | other-proj
2471 // v
2472 // * new-region
2473 // |
2474 // v
2475 // * dum_if
2476 // / \
2477 // v \
2478 // * dum-proj v
2479 // proj
2480 //
insert_region_before_proj(ProjNode * proj)2481 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
2482 IfNode* iff = proj->in(0)->as_If();
2483 IdealLoopTree *loop = get_loop(proj);
2484 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2485 int ddepth = dom_depth(proj);
2486
2487 _igvn.rehash_node_delayed(iff);
2488 _igvn.rehash_node_delayed(proj);
2489
2490 proj->set_req(0, NULL); // temporary disconnect
2491 ProjNode* proj2 = proj_clone(proj, iff);
2492 register_node(proj2, loop, iff, ddepth);
2493
2494 RegionNode* reg = new RegionNode(2);
2495 reg->set_req(1, proj2);
2496 register_node(reg, loop, iff, ddepth);
2497
2498 IfNode* dum_if = new IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt);
2499 register_node(dum_if, loop, reg, ddepth);
2500
2501 proj->set_req(0, dum_if); // reattach
2502 set_idom(proj, dum_if, ddepth);
2503
2504 ProjNode* dum_proj = proj_clone(other_proj, dum_if);
2505 register_node(dum_proj, loop, dum_if, ddepth);
2506
2507 return reg;
2508 }
2509
2510 //------------------------------ insert_cmpi_loop_exit -------------------------------------
2511 // Clone a signed compare loop exit from an unsigned compare and
2512 // insert it before the unsigned cmp on the stay-in-loop path.
2513 // All new nodes inserted in the dominator tree between the original
2514 // if and it's projections. The original if test is replaced with
2515 // a constant to force the stay-in-loop path.
2516 //
2517 // This is done to make sure that the original if and it's projections
2518 // still dominate the same set of control nodes, that the ctrl() relation
2519 // from data nodes to them is preserved, and that their loop nesting is
2520 // preserved.
2521 //
2522 // before
2523 // if(i <u limit) unsigned compare loop exit
2524 // / |
2525 // v v
2526 // exit-proj stay-in-loop-proj
2527 //
2528 // after
2529 // if(stay-in-loop-const) original if
2530 // / |
2531 // / v
2532 // / if(i < limit) new signed test
2533 // / / |
2534 // / / v
2535 // / / if(i <u limit) new cloned unsigned test
2536 // / / / |
2537 // v v v |
2538 // region |
2539 // | |
2540 // dum-if |
2541 // / | |
2542 // ether | |
2543 // v v
2544 // exit-proj stay-in-loop-proj
2545 //
insert_cmpi_loop_exit(IfNode * if_cmpu,IdealLoopTree * loop)2546 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) {
2547 const bool Signed = true;
2548 const bool Unsigned = false;
2549
2550 BoolNode* bol = if_cmpu->in(1)->as_Bool();
2551 if (bol->_test._test != BoolTest::lt) return NULL;
2552 CmpNode* cmpu = bol->in(1)->as_Cmp();
2553 if (cmpu->Opcode() != Op_CmpU) return NULL;
2554 int stride = stride_of_possible_iv(if_cmpu);
2555 if (stride == 0) return NULL;
2556
2557 Node* lp_proj = stay_in_loop(if_cmpu, loop);
2558 guarantee(lp_proj != NULL, "null loop node");
2559
2560 ProjNode* lp_continue = lp_proj->as_Proj();
2561 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
2562
2563 Node* limit = NULL;
2564 if (stride > 0) {
2565 limit = cmpu->in(2);
2566 } else {
2567 limit = _igvn.makecon(TypeInt::ZERO);
2568 set_ctrl(limit, C->root());
2569 }
2570 // Create a new region on the exit path
2571 RegionNode* reg = insert_region_before_proj(lp_exit);
2572 guarantee(reg != NULL, "null region node");
2573
2574 // Clone the if-cmpu-true-false using a signed compare
2575 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
2576 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue);
2577 reg->add_req(cmpi_exit);
2578
2579 // Clone the if-cmpu-true-false
2580 BoolTest::mask rel_u = bol->_test._test;
2581 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
2582 reg->add_req(cmpu_exit);
2583
2584 // Force original if to stay in loop.
2585 short_circuit_if(if_cmpu, lp_continue);
2586
2587 return cmpi_exit->in(0)->as_If();
2588 }
2589
2590 //------------------------------ remove_cmpi_loop_exit -------------------------------------
2591 // Remove a previously inserted signed compare loop exit.
remove_cmpi_loop_exit(IfNode * if_cmp,IdealLoopTree * loop)2592 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
2593 Node* lp_proj = stay_in_loop(if_cmp, loop);
2594 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
2595 stay_in_loop(lp_proj, loop)->is_If() &&
2596 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
2597 Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
2598 set_ctrl(con, C->root());
2599 if_cmp->set_req(1, con);
2600 }
2601
2602 //------------------------------ scheduled_nodelist -------------------------------------
2603 // Create a post order schedule of nodes that are in the
2604 // "member" set. The list is returned in "sched".
2605 // The first node in "sched" is the loop head, followed by
2606 // nodes which have no inputs in the "member" set, and then
2607 // followed by the nodes that have an immediate input dependence
2608 // on a node in "sched".
scheduled_nodelist(IdealLoopTree * loop,VectorSet & member,Node_List & sched)2609 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
2610
2611 assert(member.test(loop->_head->_idx), "loop head must be in member set");
2612 Arena *a = Thread::current()->resource_area();
2613 VectorSet visited(a);
2614 Node_Stack nstack(a, loop->_body.size());
2615
2616 Node* n = loop->_head; // top of stack is cached in "n"
2617 uint idx = 0;
2618 visited.set(n->_idx);
2619
2620 // Initially push all with no inputs from within member set
2621 for(uint i = 0; i < loop->_body.size(); i++ ) {
2622 Node *elt = loop->_body.at(i);
2623 if (member.test(elt->_idx)) {
2624 bool found = false;
2625 for (uint j = 0; j < elt->req(); j++) {
2626 Node* def = elt->in(j);
2627 if (def && member.test(def->_idx) && def != elt) {
2628 found = true;
2629 break;
2630 }
2631 }
2632 if (!found && elt != loop->_head) {
2633 nstack.push(n, idx);
2634 n = elt;
2635 assert(!visited.test(n->_idx), "not seen yet");
2636 visited.set(n->_idx);
2637 }
2638 }
2639 }
2640
2641 // traverse out's that are in the member set
2642 while (true) {
2643 if (idx < n->outcnt()) {
2644 Node* use = n->raw_out(idx);
2645 idx++;
2646 if (!visited.test_set(use->_idx)) {
2647 if (member.test(use->_idx)) {
2648 nstack.push(n, idx);
2649 n = use;
2650 idx = 0;
2651 }
2652 }
2653 } else {
2654 // All outputs processed
2655 sched.push(n);
2656 if (nstack.is_empty()) break;
2657 n = nstack.node();
2658 idx = nstack.index();
2659 nstack.pop();
2660 }
2661 }
2662 }
2663
2664
2665 //------------------------------ has_use_in_set -------------------------------------
2666 // Has a use in the vector set
has_use_in_set(Node * n,VectorSet & vset)2667 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
2668 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2669 Node* use = n->fast_out(j);
2670 if (vset.test(use->_idx)) {
2671 return true;
2672 }
2673 }
2674 return false;
2675 }
2676
2677
2678 //------------------------------ has_use_internal_to_set -------------------------------------
2679 // Has use internal to the vector set (ie. not in a phi at the loop head)
has_use_internal_to_set(Node * n,VectorSet & vset,IdealLoopTree * loop)2680 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
2681 Node* head = loop->_head;
2682 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2683 Node* use = n->fast_out(j);
2684 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
2685 return true;
2686 }
2687 }
2688 return false;
2689 }
2690
2691
2692 //------------------------------ clone_for_use_outside_loop -------------------------------------
2693 // clone "n" for uses that are outside of loop
clone_for_use_outside_loop(IdealLoopTree * loop,Node * n,Node_List & worklist)2694 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
2695 int cloned = 0;
2696 assert(worklist.size() == 0, "should be empty");
2697 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2698 Node* use = n->fast_out(j);
2699 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
2700 worklist.push(use);
2701 }
2702 }
2703 while( worklist.size() ) {
2704 Node *use = worklist.pop();
2705 if (!has_node(use) || use->in(0) == C->top()) continue;
2706 uint j;
2707 for (j = 0; j < use->req(); j++) {
2708 if (use->in(j) == n) break;
2709 }
2710 assert(j < use->req(), "must be there");
2711
2712 // clone "n" and insert it between the inputs of "n" and the use outside the loop
2713 Node* n_clone = n->clone();
2714 _igvn.replace_input_of(use, j, n_clone);
2715 cloned++;
2716 Node* use_c;
2717 if (!use->is_Phi()) {
2718 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
2719 } else {
2720 // Use in a phi is considered a use in the associated predecessor block
2721 use_c = use->in(0)->in(j);
2722 }
2723 set_ctrl(n_clone, use_c);
2724 assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
2725 get_loop(use_c)->_body.push(n_clone);
2726 _igvn.register_new_node_with_optimizer(n_clone);
2727 #ifndef PRODUCT
2728 if (TracePartialPeeling) {
2729 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
2730 }
2731 #endif
2732 }
2733 return cloned;
2734 }
2735
2736
2737 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
2738 // clone "n" for special uses that are in the not_peeled region.
2739 // If these def-uses occur in separate blocks, the code generator
2740 // marks the method as not compilable. For example, if a "BoolNode"
2741 // is in a different basic block than the "IfNode" that uses it, then
2742 // the compilation is aborted in the code generator.
clone_for_special_use_inside_loop(IdealLoopTree * loop,Node * n,VectorSet & not_peel,Node_List & sink_list,Node_List & worklist)2743 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
2744 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
2745 if (n->is_Phi() || n->is_Load()) {
2746 return;
2747 }
2748 assert(worklist.size() == 0, "should be empty");
2749 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2750 Node* use = n->fast_out(j);
2751 if ( not_peel.test(use->_idx) &&
2752 (use->is_If() || use->is_CMove() || use->is_Bool()) &&
2753 use->in(1) == n) {
2754 worklist.push(use);
2755 }
2756 }
2757 if (worklist.size() > 0) {
2758 // clone "n" and insert it between inputs of "n" and the use
2759 Node* n_clone = n->clone();
2760 loop->_body.push(n_clone);
2761 _igvn.register_new_node_with_optimizer(n_clone);
2762 set_ctrl(n_clone, get_ctrl(n));
2763 sink_list.push(n_clone);
2764 not_peel.set(n_clone->_idx);
2765 #ifndef PRODUCT
2766 if (TracePartialPeeling) {
2767 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
2768 }
2769 #endif
2770 while( worklist.size() ) {
2771 Node *use = worklist.pop();
2772 _igvn.rehash_node_delayed(use);
2773 for (uint j = 1; j < use->req(); j++) {
2774 if (use->in(j) == n) {
2775 use->set_req(j, n_clone);
2776 }
2777 }
2778 }
2779 }
2780 }
2781
2782
2783 //------------------------------ insert_phi_for_loop -------------------------------------
2784 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
insert_phi_for_loop(Node * use,uint idx,Node * lp_entry_val,Node * back_edge_val,LoopNode * lp)2785 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
2786 Node *phi = PhiNode::make(lp, back_edge_val);
2787 phi->set_req(LoopNode::EntryControl, lp_entry_val);
2788 // Use existing phi if it already exists
2789 Node *hit = _igvn.hash_find_insert(phi);
2790 if( hit == NULL ) {
2791 _igvn.register_new_node_with_optimizer(phi);
2792 set_ctrl(phi, lp);
2793 } else {
2794 // Remove the new phi from the graph and use the hit
2795 _igvn.remove_dead_node(phi);
2796 phi = hit;
2797 }
2798 _igvn.replace_input_of(use, idx, phi);
2799 }
2800
2801 #ifdef ASSERT
2802 //------------------------------ is_valid_loop_partition -------------------------------------
2803 // Validate the loop partition sets: peel and not_peel
is_valid_loop_partition(IdealLoopTree * loop,VectorSet & peel,Node_List & peel_list,VectorSet & not_peel)2804 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
2805 VectorSet& not_peel ) {
2806 uint i;
2807 // Check that peel_list entries are in the peel set
2808 for (i = 0; i < peel_list.size(); i++) {
2809 if (!peel.test(peel_list.at(i)->_idx)) {
2810 return false;
2811 }
2812 }
2813 // Check at loop members are in one of peel set or not_peel set
2814 for (i = 0; i < loop->_body.size(); i++ ) {
2815 Node *def = loop->_body.at(i);
2816 uint di = def->_idx;
2817 // Check that peel set elements are in peel_list
2818 if (peel.test(di)) {
2819 if (not_peel.test(di)) {
2820 return false;
2821 }
2822 // Must be in peel_list also
2823 bool found = false;
2824 for (uint j = 0; j < peel_list.size(); j++) {
2825 if (peel_list.at(j)->_idx == di) {
2826 found = true;
2827 break;
2828 }
2829 }
2830 if (!found) {
2831 return false;
2832 }
2833 } else if (not_peel.test(di)) {
2834 if (peel.test(di)) {
2835 return false;
2836 }
2837 } else {
2838 return false;
2839 }
2840 }
2841 return true;
2842 }
2843
2844 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
2845 // Ensure a use outside of loop is of the right form
is_valid_clone_loop_exit_use(IdealLoopTree * loop,Node * use,uint exit_idx)2846 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
2847 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2848 return (use->is_Phi() &&
2849 use_c->is_Region() && use_c->req() == 3 &&
2850 (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
2851 use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
2852 use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
2853 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
2854 }
2855
2856 //------------------------------ is_valid_clone_loop_form -------------------------------------
2857 // Ensure that all uses outside of loop are of the right form
is_valid_clone_loop_form(IdealLoopTree * loop,Node_List & peel_list,uint orig_exit_idx,uint clone_exit_idx)2858 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
2859 uint orig_exit_idx, uint clone_exit_idx) {
2860 uint len = peel_list.size();
2861 for (uint i = 0; i < len; i++) {
2862 Node *def = peel_list.at(i);
2863
2864 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
2865 Node *use = def->fast_out(j);
2866 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
2867 if (!loop->is_member(get_loop(use_c))) {
2868 // use is not in the loop, check for correct structure
2869 if (use->in(0) == def) {
2870 // Okay
2871 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
2872 return false;
2873 }
2874 }
2875 }
2876 }
2877 return true;
2878 }
2879 #endif
2880
2881 //------------------------------ partial_peel -------------------------------------
2882 // Partially peel (aka loop rotation) the top portion of a loop (called
2883 // the peel section below) by cloning it and placing one copy just before
2884 // the new loop head and the other copy at the bottom of the new loop.
2885 //
2886 // before after where it came from
2887 //
2888 // stmt1 stmt1
2889 // loop: stmt2 clone
2890 // stmt2 if condA goto exitA clone
2891 // if condA goto exitA new_loop: new
2892 // stmt3 stmt3 clone
2893 // if !condB goto loop if condB goto exitB clone
2894 // exitB: stmt2 orig
2895 // stmt4 if !condA goto new_loop orig
2896 // exitA: goto exitA
2897 // exitB:
2898 // stmt4
2899 // exitA:
2900 //
2901 // Step 1: find the cut point: an exit test on probable
2902 // induction variable.
2903 // Step 2: schedule (with cloning) operations in the peel
2904 // section that can be executed after the cut into
2905 // the section that is not peeled. This may need
2906 // to clone operations into exit blocks. For
2907 // instance, a reference to A[i] in the not-peel
2908 // section and a reference to B[i] in an exit block
2909 // may cause a left-shift of i by 2 to be placed
2910 // in the peel block. This step will clone the left
2911 // shift into the exit block and sink the left shift
2912 // from the peel to the not-peel section.
2913 // Step 3: clone the loop, retarget the control, and insert
2914 // phis for values that are live across the new loop
2915 // head. This is very dependent on the graph structure
2916 // from clone_loop. It creates region nodes for
2917 // exit control and associated phi nodes for values
2918 // flow out of the loop through that exit. The region
2919 // node is dominated by the clone's control projection.
2920 // So the clone's peel section is placed before the
2921 // new loop head, and the clone's not-peel section is
2922 // forms the top part of the new loop. The original
2923 // peel section forms the tail of the new loop.
2924 // Step 4: update the dominator tree and recompute the
2925 // dominator depth.
2926 //
2927 // orig
2928 //
2929 // stmt1
2930 // |
2931 // v
2932 // loop predicate
2933 // |
2934 // v
2935 // loop<----+
2936 // | |
2937 // stmt2 |
2938 // | |
2939 // v |
2940 // ifA |
2941 // / | |
2942 // v v |
2943 // false true ^ <-- last_peel
2944 // / | |
2945 // / ===|==cut |
2946 // / stmt3 | <-- first_not_peel
2947 // / | |
2948 // | v |
2949 // v ifB |
2950 // exitA: / \ |
2951 // / \ |
2952 // v v |
2953 // false true |
2954 // / \ |
2955 // / ----+
2956 // |
2957 // v
2958 // exitB:
2959 // stmt4
2960 //
2961 //
2962 // after clone loop
2963 //
2964 // stmt1
2965 // |
2966 // v
2967 // loop predicate
2968 // / \
2969 // clone / \ orig
2970 // / \
2971 // / \
2972 // v v
2973 // +---->loop loop<----+
2974 // | | | |
2975 // | stmt2 stmt2 |
2976 // | | | |
2977 // | v v |
2978 // | ifA ifA |
2979 // | | \ / | |
2980 // | v v v v |
2981 // ^ true false false true ^ <-- last_peel
2982 // | | ^ \ / | |
2983 // | cut==|== \ \ / ===|==cut |
2984 // | stmt3 \ \ / stmt3 | <-- first_not_peel
2985 // | | dom | | | |
2986 // | v \ 1v v2 v |
2987 // | ifB regionA ifB |
2988 // | / \ | / \ |
2989 // | / \ v / \ |
2990 // | v v exitA: v v |
2991 // | true false false true |
2992 // | / ^ \ / \ |
2993 // +---- \ \ / ----+
2994 // dom \ /
2995 // \ 1v v2
2996 // regionB
2997 // |
2998 // v
2999 // exitB:
3000 // stmt4
3001 //
3002 //
3003 // after partial peel
3004 //
3005 // stmt1
3006 // |
3007 // v
3008 // loop predicate
3009 // /
3010 // clone / orig
3011 // / TOP
3012 // / \
3013 // v v
3014 // TOP->loop loop----+
3015 // | | |
3016 // stmt2 stmt2 |
3017 // | | |
3018 // v v |
3019 // ifA ifA |
3020 // | \ / | |
3021 // v v v v |
3022 // true false false true | <-- last_peel
3023 // | ^ \ / +------|---+
3024 // +->newloop \ \ / === ==cut | |
3025 // | stmt3 \ \ / TOP | |
3026 // | | dom | | stmt3 | | <-- first_not_peel
3027 // | v \ 1v v2 v | |
3028 // | ifB regionA ifB ^ v
3029 // | / \ | / \ | |
3030 // | / \ v / \ | |
3031 // | v v exitA: v v | |
3032 // | true false false true | |
3033 // | / ^ \ / \ | |
3034 // | | \ \ / v | |
3035 // | | dom \ / TOP | |
3036 // | | \ 1v v2 | |
3037 // ^ v regionB | |
3038 // | | | | |
3039 // | | v ^ v
3040 // | | exitB: | |
3041 // | | stmt4 | |
3042 // | +------------>-----------------+ |
3043 // | |
3044 // +-----------------<---------------------+
3045 //
3046 //
3047 // final graph
3048 //
3049 // stmt1
3050 // |
3051 // v
3052 // loop predicate
3053 // |
3054 // v
3055 // stmt2 clone
3056 // |
3057 // v
3058 // ........> ifA clone
3059 // : / |
3060 // dom / |
3061 // : v v
3062 // : false true
3063 // : | |
3064 // : | v
3065 // : | newloop<-----+
3066 // : | | |
3067 // : | stmt3 clone |
3068 // : | | |
3069 // : | v |
3070 // : | ifB |
3071 // : | / \ |
3072 // : | v v |
3073 // : | false true |
3074 // : | | | |
3075 // : | v stmt2 |
3076 // : | exitB: | |
3077 // : | stmt4 v |
3078 // : | ifA orig |
3079 // : | / \ |
3080 // : | / \ |
3081 // : | v v |
3082 // : | false true |
3083 // : | / \ |
3084 // : v v -----+
3085 // RegionA
3086 // |
3087 // v
3088 // exitA
3089 //
partial_peel(IdealLoopTree * loop,Node_List & old_new)3090 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3091
3092 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3093 if (!loop->_head->is_Loop()) {
3094 return false;
3095 }
3096 LoopNode *head = loop->_head->as_Loop();
3097
3098 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3099 return false;
3100 }
3101
3102 // Check for complex exit control
3103 for (uint ii = 0; ii < loop->_body.size(); ii++) {
3104 Node *n = loop->_body.at(ii);
3105 int opc = n->Opcode();
3106 if (n->is_Call() ||
3107 opc == Op_Catch ||
3108 opc == Op_CatchProj ||
3109 opc == Op_Jump ||
3110 opc == Op_JumpProj) {
3111 #ifndef PRODUCT
3112 if (TracePartialPeeling) {
3113 tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3114 }
3115 #endif
3116 return false;
3117 }
3118 }
3119
3120 int dd = dom_depth(head);
3121
3122 // Step 1: find cut point
3123
3124 // Walk up dominators to loop head looking for first loop exit
3125 // which is executed on every path thru loop.
3126 IfNode *peel_if = NULL;
3127 IfNode *peel_if_cmpu = NULL;
3128
3129 Node *iff = loop->tail();
3130 while (iff != head) {
3131 if (iff->is_If()) {
3132 Node *ctrl = get_ctrl(iff->in(1));
3133 if (ctrl->is_top()) return false; // Dead test on live IF.
3134 // If loop-varying exit-test, check for induction variable
3135 if (loop->is_member(get_loop(ctrl)) &&
3136 loop->is_loop_exit(iff) &&
3137 is_possible_iv_test(iff)) {
3138 Node* cmp = iff->in(1)->in(1);
3139 if (cmp->Opcode() == Op_CmpI) {
3140 peel_if = iff->as_If();
3141 } else {
3142 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
3143 peel_if_cmpu = iff->as_If();
3144 }
3145 }
3146 }
3147 iff = idom(iff);
3148 }
3149
3150 // Prefer signed compare over unsigned compare.
3151 IfNode* new_peel_if = NULL;
3152 if (peel_if == NULL) {
3153 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) {
3154 return false; // No peel point found
3155 }
3156 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
3157 if (new_peel_if == NULL) {
3158 return false; // No peel point found
3159 }
3160 peel_if = new_peel_if;
3161 }
3162 Node* last_peel = stay_in_loop(peel_if, loop);
3163 Node* first_not_peeled = stay_in_loop(last_peel, loop);
3164 if (first_not_peeled == NULL || first_not_peeled == head) {
3165 return false;
3166 }
3167
3168 #ifndef PRODUCT
3169 if (TraceLoopOpts) {
3170 tty->print("PartialPeel ");
3171 loop->dump_head();
3172 }
3173
3174 if (TracePartialPeeling) {
3175 tty->print_cr("before partial peel one iteration");
3176 Node_List wl;
3177 Node* t = head->in(2);
3178 while (true) {
3179 wl.push(t);
3180 if (t == head) break;
3181 t = idom(t);
3182 }
3183 while (wl.size() > 0) {
3184 Node* tt = wl.pop();
3185 tt->dump();
3186 if (tt == last_peel) tty->print_cr("-- cut --");
3187 }
3188 }
3189 #endif
3190 ResourceArea *area = Thread::current()->resource_area();
3191 VectorSet peel(area);
3192 VectorSet not_peel(area);
3193 Node_List peel_list(area);
3194 Node_List worklist(area);
3195 Node_List sink_list(area);
3196
3197 uint estimate = loop->est_loop_clone_sz(1);
3198 if (exceeding_node_budget(estimate)) {
3199 return false;
3200 }
3201
3202 // Set of cfg nodes to peel are those that are executable from
3203 // the head through last_peel.
3204 assert(worklist.size() == 0, "should be empty");
3205 worklist.push(head);
3206 peel.set(head->_idx);
3207 while (worklist.size() > 0) {
3208 Node *n = worklist.pop();
3209 if (n != last_peel) {
3210 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3211 Node* use = n->fast_out(j);
3212 if (use->is_CFG() &&
3213 loop->is_member(get_loop(use)) &&
3214 !peel.test_set(use->_idx)) {
3215 worklist.push(use);
3216 }
3217 }
3218 }
3219 }
3220
3221 // Set of non-cfg nodes to peel are those that are control
3222 // dependent on the cfg nodes.
3223 for (uint i = 0; i < loop->_body.size(); i++) {
3224 Node *n = loop->_body.at(i);
3225 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
3226 if (peel.test(n_c->_idx)) {
3227 peel.set(n->_idx);
3228 } else {
3229 not_peel.set(n->_idx);
3230 }
3231 }
3232
3233 // Step 2: move operations from the peeled section down into the
3234 // not-peeled section
3235
3236 // Get a post order schedule of nodes in the peel region
3237 // Result in right-most operand.
3238 scheduled_nodelist(loop, peel, peel_list);
3239
3240 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3241
3242 // For future check for too many new phis
3243 uint old_phi_cnt = 0;
3244 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
3245 Node* use = head->fast_out(j);
3246 if (use->is_Phi()) old_phi_cnt++;
3247 }
3248
3249 #ifndef PRODUCT
3250 if (TracePartialPeeling) {
3251 tty->print_cr("\npeeled list");
3252 }
3253 #endif
3254
3255 // Evacuate nodes in peel region into the not_peeled region if possible
3256 uint new_phi_cnt = 0;
3257 uint cloned_for_outside_use = 0;
3258 for (uint i = 0; i < peel_list.size();) {
3259 Node* n = peel_list.at(i);
3260 #ifndef PRODUCT
3261 if (TracePartialPeeling) n->dump();
3262 #endif
3263 bool incr = true;
3264 if (!n->is_CFG()) {
3265 if (has_use_in_set(n, not_peel)) {
3266 // If not used internal to the peeled region,
3267 // move "n" from peeled to not_peeled region.
3268 if (!has_use_internal_to_set(n, peel, loop)) {
3269 // if not pinned and not a load (which maybe anti-dependent on a store)
3270 // and not a CMove (Matcher expects only bool->cmove).
3271 if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) {
3272 cloned_for_outside_use += clone_for_use_outside_loop(loop, n, worklist);
3273 sink_list.push(n);
3274 peel.remove(n->_idx);
3275 not_peel.set(n->_idx);
3276 peel_list.remove(i);
3277 incr = false;
3278 #ifndef PRODUCT
3279 if (TracePartialPeeling) {
3280 tty->print_cr("sink to not_peeled region: %d newbb: %d",
3281 n->_idx, get_ctrl(n)->_idx);
3282 }
3283 #endif
3284 }
3285 } else {
3286 // Otherwise check for special def-use cases that span
3287 // the peel/not_peel boundary such as bool->if
3288 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
3289 new_phi_cnt++;
3290 }
3291 }
3292 }
3293 if (incr) i++;
3294 }
3295
3296 estimate += cloned_for_outside_use + new_phi_cnt;
3297 bool exceed_node_budget = !may_require_nodes(estimate);
3298 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
3299
3300 if (exceed_node_budget || exceed_phi_limit) {
3301 #ifndef PRODUCT
3302 if (TracePartialPeeling) {
3303 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
3304 new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F');
3305 }
3306 #endif
3307 if (new_peel_if != NULL) {
3308 remove_cmpi_loop_exit(new_peel_if, loop);
3309 }
3310 // Inhibit more partial peeling on this loop
3311 assert(!head->is_partial_peel_loop(), "not partial peeled");
3312 head->mark_partial_peel_failed();
3313 if (cloned_for_outside_use > 0) {
3314 // Terminate this round of loop opts because
3315 // the graph outside this loop was changed.
3316 C->set_major_progress();
3317 return true;
3318 }
3319 return false;
3320 }
3321
3322 // Step 3: clone loop, retarget control, and insert new phis
3323
3324 // Create new loop head for new phis and to hang
3325 // the nodes being moved (sinked) from the peel region.
3326 LoopNode* new_head = new LoopNode(last_peel, last_peel);
3327 new_head->set_unswitch_count(head->unswitch_count()); // Preserve
3328 _igvn.register_new_node_with_optimizer(new_head);
3329 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
3330 _igvn.replace_input_of(first_not_peeled, 0, new_head);
3331 set_loop(new_head, loop);
3332 loop->_body.push(new_head);
3333 not_peel.set(new_head->_idx);
3334 set_idom(new_head, last_peel, dom_depth(first_not_peeled));
3335 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
3336
3337 while (sink_list.size() > 0) {
3338 Node* n = sink_list.pop();
3339 set_ctrl(n, new_head);
3340 }
3341
3342 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3343
3344 clone_loop(loop, old_new, dd, IgnoreStripMined);
3345
3346 const uint clone_exit_idx = 1;
3347 const uint orig_exit_idx = 2;
3348 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
3349
3350 Node* head_clone = old_new[head->_idx];
3351 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
3352 Node* orig_tail_clone = head_clone->in(2);
3353
3354 // Add phi if "def" node is in peel set and "use" is not
3355
3356 for (uint i = 0; i < peel_list.size(); i++) {
3357 Node *def = peel_list.at(i);
3358 if (!def->is_CFG()) {
3359 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3360 Node *use = def->fast_out(j);
3361 if (has_node(use) && use->in(0) != C->top() &&
3362 (!peel.test(use->_idx) ||
3363 (use->is_Phi() && use->in(0) == head)) ) {
3364 worklist.push(use);
3365 }
3366 }
3367 while( worklist.size() ) {
3368 Node *use = worklist.pop();
3369 for (uint j = 1; j < use->req(); j++) {
3370 Node* n = use->in(j);
3371 if (n == def) {
3372
3373 // "def" is in peel set, "use" is not in peel set
3374 // or "use" is in the entry boundary (a phi) of the peel set
3375
3376 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
3377
3378 if ( loop->is_member(get_loop( use_c )) ) {
3379 // use is in loop
3380 if (old_new[use->_idx] != NULL) { // null for dead code
3381 Node* use_clone = old_new[use->_idx];
3382 _igvn.replace_input_of(use, j, C->top());
3383 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
3384 }
3385 } else {
3386 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
3387 // use is not in the loop, check if the live range includes the cut
3388 Node* lp_if = use_c->in(orig_exit_idx)->in(0);
3389 if (not_peel.test(lp_if->_idx)) {
3390 assert(j == orig_exit_idx, "use from original loop");
3391 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
3392 }
3393 }
3394 }
3395 }
3396 }
3397 }
3398 }
3399
3400 // Step 3b: retarget control
3401
3402 // Redirect control to the new loop head if a cloned node in
3403 // the not_peeled region has control that points into the peeled region.
3404 // This necessary because the cloned peeled region will be outside
3405 // the loop.
3406 // from to
3407 // cloned-peeled <---+
3408 // new_head_clone: | <--+
3409 // cloned-not_peeled in(0) in(0)
3410 // orig-peeled
3411
3412 for (uint i = 0; i < loop->_body.size(); i++) {
3413 Node *n = loop->_body.at(i);
3414 if (!n->is_CFG() && n->in(0) != NULL &&
3415 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
3416 Node* n_clone = old_new[n->_idx];
3417 _igvn.replace_input_of(n_clone, 0, new_head_clone);
3418 }
3419 }
3420
3421 // Backedge of the surviving new_head (the clone) is original last_peel
3422 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
3423
3424 // Cut first node in original not_peel set
3425 _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
3426 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
3427 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
3428
3429 // Copy head_clone back-branch info to original head
3430 // and remove original head's loop entry and
3431 // clone head's back-branch
3432 _igvn.rehash_node_delayed(head); // Multiple edge updates
3433 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
3434 head->set_req(LoopNode::LoopBackControl, C->top());
3435 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
3436
3437 // Similarly modify the phis
3438 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
3439 Node* use = head->fast_out(k);
3440 if (use->is_Phi() && use->outcnt() > 0) {
3441 Node* use_clone = old_new[use->_idx];
3442 _igvn.rehash_node_delayed(use); // Multiple edge updates
3443 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
3444 use->set_req(LoopNode::LoopBackControl, C->top());
3445 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
3446 }
3447 }
3448
3449 // Step 4: update dominator tree and dominator depth
3450
3451 set_idom(head, orig_tail_clone, dd);
3452 recompute_dom_depth();
3453
3454 // Inhibit more partial peeling on this loop
3455 new_head_clone->set_partial_peel_loop();
3456 C->set_major_progress();
3457 loop->record_for_igvn();
3458
3459 #ifndef PRODUCT
3460 if (TracePartialPeeling) {
3461 tty->print_cr("\nafter partial peel one iteration");
3462 Node_List wl(area);
3463 Node* t = last_peel;
3464 while (true) {
3465 wl.push(t);
3466 if (t == head_clone) break;
3467 t = idom(t);
3468 }
3469 while (wl.size() > 0) {
3470 Node* tt = wl.pop();
3471 if (tt == head) tty->print_cr("orig head");
3472 else if (tt == new_head_clone) tty->print_cr("new head");
3473 else if (tt == head_clone) tty->print_cr("clone head");
3474 tt->dump();
3475 }
3476 }
3477 #endif
3478 return true;
3479 }
3480
3481 //------------------------------reorg_offsets----------------------------------
3482 // Reorganize offset computations to lower register pressure. Mostly
3483 // prevent loop-fallout uses of the pre-incremented trip counter (which are
3484 // then alive with the post-incremented trip counter forcing an extra
3485 // register move)
reorg_offsets(IdealLoopTree * loop)3486 void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) {
3487 // Perform it only for canonical counted loops.
3488 // Loop's shape could be messed up by iteration_split_impl.
3489 if (!loop->_head->is_CountedLoop())
3490 return;
3491 if (!loop->_head->as_Loop()->is_valid_counted_loop())
3492 return;
3493
3494 CountedLoopNode *cl = loop->_head->as_CountedLoop();
3495 CountedLoopEndNode *cle = cl->loopexit();
3496 Node *exit = cle->proj_out(false);
3497 Node *phi = cl->phi();
3498
3499 // Check for the special case when using the pre-incremented trip-counter on
3500 // the fall-out path (forces the pre-incremented and post-incremented trip
3501 // counter to be live at the same time). Fix this by adjusting to use the
3502 // post-increment trip counter.
3503
3504 bool progress = true;
3505 while (progress) {
3506 progress = false;
3507 for (DUIterator_Fast imax, i = phi->fast_outs(imax); i < imax; i++) {
3508 Node* use = phi->fast_out(i); // User of trip-counter
3509 if (!has_ctrl(use)) continue;
3510 Node *u_ctrl = get_ctrl(use);
3511 if (use->is_Phi()) {
3512 u_ctrl = NULL;
3513 for (uint j = 1; j < use->req(); j++)
3514 if (use->in(j) == phi)
3515 u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j));
3516 }
3517 IdealLoopTree *u_loop = get_loop(u_ctrl);
3518 // Look for loop-invariant use
3519 if (u_loop == loop) continue;
3520 if (loop->is_member(u_loop)) continue;
3521 // Check that use is live out the bottom. Assuming the trip-counter
3522 // update is right at the bottom, uses of of the loop middle are ok.
3523 if (dom_lca(exit, u_ctrl) != exit) continue;
3524 // Hit! Refactor use to use the post-incremented tripcounter.
3525 // Compute a post-increment tripcounter.
3526 Node* c = exit;
3527 if (cl->is_strip_mined()) {
3528 IdealLoopTree* outer_loop = get_loop(cl->outer_loop());
3529 if (!outer_loop->is_member(u_loop)) {
3530 c = cl->outer_loop_exit();
3531 }
3532 }
3533 Node *opaq = new Opaque2Node(C, cle->incr());
3534 register_new_node(opaq, c);
3535 Node *neg_stride = _igvn.intcon(-cle->stride_con());
3536 set_ctrl(neg_stride, C->root());
3537 Node *post = new AddINode(opaq, neg_stride);
3538 register_new_node(post, c);
3539 _igvn.rehash_node_delayed(use);
3540 for (uint j = 1; j < use->req(); j++) {
3541 if (use->in(j) == phi)
3542 use->set_req(j, post);
3543 }
3544 // Since DU info changed, rerun loop
3545 progress = true;
3546 break;
3547 }
3548 }
3549
3550 }
3551