1 /*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "opto/callnode.hpp"
28 #include "opto/loopnode.hpp"
29 #include "opto/movenode.hpp"
30
31
32 //------------------------------split_thru_region------------------------------
33 // Split Node 'n' through merge point.
split_thru_region(Node * n,Node * region)34 Node *PhaseIdealLoop::split_thru_region( Node *n, Node *region ) {
35 uint wins = 0;
36 assert( n->is_CFG(), "" );
37 assert( region->is_Region(), "" );
38 Node *r = new RegionNode( region->req() );
39 IdealLoopTree *loop = get_loop( n );
40 for( uint i = 1; i < region->req(); i++ ) {
41 Node *x = n->clone();
42 Node *in0 = n->in(0);
43 if( in0->in(0) == region ) x->set_req( 0, in0->in(i) );
44 for( uint j = 1; j < n->req(); j++ ) {
45 Node *in = n->in(j);
46 if( get_ctrl(in) == region )
47 x->set_req( j, in->in(i) );
48 }
49 _igvn.register_new_node_with_optimizer(x);
50 set_loop(x, loop);
51 set_idom(x, x->in(0), dom_depth(x->in(0))+1);
52 r->init_req(i, x);
53 }
54
55 // Record region
56 r->set_req(0,region); // Not a TRUE RegionNode
57 _igvn.register_new_node_with_optimizer(r);
58 set_loop(r, loop);
59 if( !loop->_child )
60 loop->_body.push(r);
61 return r;
62 }
63
64 //------------------------------split_up---------------------------------------
65 // Split block-local op up through the phis to empty the current block
split_up(Node * n,Node * blk1,Node * blk2)66 bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
67 if( n->is_CFG() ) {
68 assert( n->in(0) != blk1, "Lousy candidate for split-if" );
69 return false;
70 }
71 if( get_ctrl(n) != blk1 && get_ctrl(n) != blk2 )
72 return false; // Not block local
73 if( n->is_Phi() ) return false; // Local PHIs are expected
74
75 // Recursively split-up inputs
76 for (uint i = 1; i < n->req(); i++) {
77 if( split_up( n->in(i), blk1, blk2 ) ) {
78 // Got split recursively and self went dead?
79 if (n->outcnt() == 0)
80 _igvn.remove_dead_node(n);
81 return true;
82 }
83 }
84
85 // Check for needing to clone-up a compare. Can't do that, it forces
86 // another (nested) split-if transform. Instead, clone it "down".
87 if( n->is_Cmp() ) {
88 assert(get_ctrl(n) == blk2 || get_ctrl(n) == blk1, "must be in block with IF");
89 // Check for simple Cmp/Bool/CMove which we can clone-up. Cmp/Bool/CMove
90 // sequence can have no other users and it must all reside in the split-if
91 // block. Non-simple Cmp/Bool/CMove sequences are 'cloned-down' below -
92 // private, per-use versions of the Cmp and Bool are made. These sink to
93 // the CMove block. If the CMove is in the split-if block, then in the
94 // next iteration this will become a simple Cmp/Bool/CMove set to clone-up.
95 Node *bol, *cmov;
96 if( !(n->outcnt() == 1 && n->unique_out()->is_Bool() &&
97 (bol = n->unique_out()->as_Bool()) &&
98 (get_ctrl(bol) == blk1 ||
99 get_ctrl(bol) == blk2) &&
100 bol->outcnt() == 1 &&
101 bol->unique_out()->is_CMove() &&
102 (cmov = bol->unique_out()->as_CMove()) &&
103 (get_ctrl(cmov) == blk1 ||
104 get_ctrl(cmov) == blk2) ) ) {
105
106 // Must clone down
107 #ifndef PRODUCT
108 if( PrintOpto && VerifyLoopOptimizations ) {
109 tty->print("Cloning down: ");
110 n->dump();
111 }
112 #endif
113 if (!n->is_FastLock()) {
114 // Clone down any block-local BoolNode uses of this CmpNode
115 for (DUIterator i = n->outs(); n->has_out(i); i++) {
116 Node* bol = n->out(i);
117 assert( bol->is_Bool(), "" );
118 if (bol->outcnt() == 1) {
119 Node* use = bol->unique_out();
120 if (use->Opcode() == Op_Opaque4) {
121 if (use->outcnt() == 1) {
122 Node* iff = use->unique_out();
123 assert(iff->is_If(), "unexpected node type");
124 Node *use_c = iff->in(0);
125 if (use_c == blk1 || use_c == blk2) {
126 continue;
127 }
128 }
129 } else {
130 // We might see an Opaque1 from a loop limit check here
131 assert(use->is_If() || use->is_CMove() || use->Opcode() == Op_Opaque1, "unexpected node type");
132 Node *use_c = use->is_If() ? use->in(0) : get_ctrl(use);
133 if (use_c == blk1 || use_c == blk2) {
134 assert(use->is_CMove(), "unexpected node type");
135 continue;
136 }
137 }
138 }
139 if (get_ctrl(bol) == blk1 || get_ctrl(bol) == blk2) {
140 // Recursively sink any BoolNode
141 #ifndef PRODUCT
142 if( PrintOpto && VerifyLoopOptimizations ) {
143 tty->print("Cloning down: ");
144 bol->dump();
145 }
146 #endif
147 for (DUIterator j = bol->outs(); bol->has_out(j); j++) {
148 Node* u = bol->out(j);
149 // Uses are either IfNodes, CMoves or Opaque4
150 if (u->Opcode() == Op_Opaque4) {
151 assert(u->in(1) == bol, "bad input");
152 for (DUIterator_Last kmin, k = u->last_outs(kmin); k >= kmin; --k) {
153 Node* iff = u->last_out(k);
154 assert(iff->is_If() || iff->is_CMove(), "unexpected node type");
155 assert( iff->in(1) == u, "" );
156 // Get control block of either the CMove or the If input
157 Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
158 Node *x1 = bol->clone();
159 Node *x2 = u->clone();
160 register_new_node(x1, iff_ctrl);
161 register_new_node(x2, iff_ctrl);
162 _igvn.replace_input_of(x2, 1, x1);
163 _igvn.replace_input_of(iff, 1, x2);
164 }
165 _igvn.remove_dead_node(u);
166 --j;
167 } else {
168 // We might see an Opaque1 from a loop limit check here
169 assert(u->is_If() || u->is_CMove() || u->Opcode() == Op_Opaque1, "unexpected node type");
170 assert(u->in(1) == bol, "");
171 // Get control block of either the CMove or the If input
172 Node *u_ctrl = u->is_If() ? u->in(0) : get_ctrl(u);
173 assert((u_ctrl != blk1 && u_ctrl != blk2) || u->is_CMove(), "won't converge");
174 Node *x = bol->clone();
175 register_new_node(x, u_ctrl);
176 _igvn.replace_input_of(u, 1, x);
177 --j;
178 }
179 }
180 _igvn.remove_dead_node(bol);
181 --i;
182 }
183 }
184 }
185 // Clone down this CmpNode
186 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; --j) {
187 Node* use = n->last_out(j);
188 uint pos = 1;
189 if (n->is_FastLock()) {
190 pos = TypeFunc::Parms + 2;
191 assert(use->is_Lock(), "FastLock only used by LockNode");
192 }
193 assert(use->in(pos) == n, "" );
194 Node *x = n->clone();
195 register_new_node(x, ctrl_or_self(use));
196 _igvn.replace_input_of(use, pos, x);
197 }
198 _igvn.remove_dead_node( n );
199
200 return true;
201 }
202 }
203
204 // See if splitting-up a Store. Any anti-dep loads must go up as
205 // well. An anti-dep load might be in the wrong block, because in
206 // this particular layout/schedule we ignored anti-deps and allow
207 // memory to be alive twice. This only works if we do the same
208 // operations on anti-dep loads as we do their killing stores.
209 if( n->is_Store() && n->in(MemNode::Memory)->in(0) == n->in(0) ) {
210 // Get store's memory slice
211 int alias_idx = C->get_alias_index(_igvn.type(n->in(MemNode::Address))->is_ptr());
212
213 // Get memory-phi anti-dep loads will be using
214 Node *memphi = n->in(MemNode::Memory);
215 assert( memphi->is_Phi(), "" );
216 // Hoist any anti-dep load to the splitting block;
217 // it will then "split-up".
218 for (DUIterator_Fast imax,i = memphi->fast_outs(imax); i < imax; i++) {
219 Node *load = memphi->fast_out(i);
220 if( load->is_Load() && alias_idx == C->get_alias_index(_igvn.type(load->in(MemNode::Address))->is_ptr()) )
221 set_ctrl(load,blk1);
222 }
223 }
224
225 // Found some other Node; must clone it up
226 #ifndef PRODUCT
227 if( PrintOpto && VerifyLoopOptimizations ) {
228 tty->print("Cloning up: ");
229 n->dump();
230 }
231 #endif
232
233 // ConvI2L may have type information on it which becomes invalid if
234 // it moves up in the graph so change any clones so widen the type
235 // to TypeLong::INT when pushing it up.
236 const Type* rtype = NULL;
237 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) {
238 rtype = TypeLong::INT;
239 }
240
241 // Now actually split-up this guy. One copy per control path merging.
242 Node *phi = PhiNode::make_blank(blk1, n);
243 for( uint j = 1; j < blk1->req(); j++ ) {
244 Node *x = n->clone();
245 // Widen the type of the ConvI2L when pushing up.
246 if (rtype != NULL) x->as_Type()->set_type(rtype);
247 if( n->in(0) && n->in(0) == blk1 )
248 x->set_req( 0, blk1->in(j) );
249 for( uint i = 1; i < n->req(); i++ ) {
250 Node *m = n->in(i);
251 if( get_ctrl(m) == blk1 ) {
252 assert( m->in(0) == blk1, "" );
253 x->set_req( i, m->in(j) );
254 }
255 }
256 register_new_node( x, blk1->in(j) );
257 phi->init_req( j, x );
258 }
259 // Announce phi to optimizer
260 register_new_node(phi, blk1);
261
262 // Remove cloned-up value from optimizer; use phi instead
263 _igvn.replace_node( n, phi );
264
265 // (There used to be a self-recursive call to split_up() here,
266 // but it is not needed. All necessary forward walking is done
267 // by do_split_if() below.)
268
269 return true;
270 }
271
272 //------------------------------register_new_node------------------------------
register_new_node(Node * n,Node * blk)273 void PhaseIdealLoop::register_new_node( Node *n, Node *blk ) {
274 assert(!n->is_CFG(), "must be data node");
275 _igvn.register_new_node_with_optimizer(n);
276 set_ctrl(n, blk);
277 IdealLoopTree *loop = get_loop(blk);
278 if( !loop->_child )
279 loop->_body.push(n);
280 }
281
282 //------------------------------small_cache------------------------------------
283 struct small_cache : public Dict {
284
small_cachesmall_cache285 small_cache() : Dict( cmpkey, hashptr ) {}
probesmall_cache286 Node *probe( Node *use_blk ) { return (Node*)((*this)[use_blk]); }
lru_insertsmall_cache287 void lru_insert( Node *use_blk, Node *new_def ) { Insert(use_blk,new_def); }
288 };
289
290 //------------------------------spinup-----------------------------------------
291 // "Spin up" the dominator tree, starting at the use site and stopping when we
292 // find the post-dominating point.
293
294 // We must be at the merge point which post-dominates 'new_false' and
295 // 'new_true'. Figure out which edges into the RegionNode eventually lead up
296 // to false and which to true. Put in a PhiNode to merge values; plug in
297 // the appropriate false-arm or true-arm values. If some path leads to the
298 // original IF, then insert a Phi recursively.
spinup(Node * iff_dom,Node * new_false,Node * new_true,Node * use_blk,Node * def,small_cache * cache)299 Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, Node *use_blk, Node *def, small_cache *cache ) {
300 if (use_blk->is_top()) // Handle dead uses
301 return use_blk;
302 Node *prior_n = (Node*)((intptr_t)0xdeadbeef);
303 Node *n = use_blk; // Get path input
304 assert( use_blk != iff_dom, "" );
305 // Here's the "spinup" the dominator tree loop. Do a cache-check
306 // along the way, in case we've come this way before.
307 while( n != iff_dom ) { // Found post-dominating point?
308 prior_n = n;
309 n = idom(n); // Search higher
310 Node *s = cache->probe( prior_n ); // Check cache
311 if( s ) return s; // Cache hit!
312 }
313
314 Node *phi_post;
315 if( prior_n == new_false || prior_n == new_true ) {
316 phi_post = def->clone();
317 phi_post->set_req(0, prior_n );
318 register_new_node(phi_post, prior_n);
319 } else {
320 // This method handles both control uses (looking for Regions) or data
321 // uses (looking for Phis). If looking for a control use, then we need
322 // to insert a Region instead of a Phi; however Regions always exist
323 // previously (the hash_find_insert below would always hit) so we can
324 // return the existing Region.
325 if( def->is_CFG() ) {
326 phi_post = prior_n; // If looking for CFG, return prior
327 } else {
328 assert( def->is_Phi(), "" );
329 assert( prior_n->is_Region(), "must be a post-dominating merge point" );
330
331 // Need a Phi here
332 phi_post = PhiNode::make_blank(prior_n, def);
333 // Search for both true and false on all paths till find one.
334 for( uint i = 1; i < phi_post->req(); i++ ) // For all paths
335 phi_post->init_req( i, spinup( iff_dom, new_false, new_true, prior_n->in(i), def, cache ) );
336 Node *t = _igvn.hash_find_insert(phi_post);
337 if( t ) { // See if we already have this one
338 // phi_post will not be used, so kill it
339 _igvn.remove_dead_node(phi_post);
340 phi_post->destruct();
341 phi_post = t;
342 } else {
343 register_new_node( phi_post, prior_n );
344 }
345 }
346 }
347
348 // Update cache everywhere
349 prior_n = (Node*)((intptr_t)0xdeadbeef); // Reset IDOM walk
350 n = use_blk; // Get path input
351 // Spin-up the idom tree again, basically doing path-compression.
352 // Insert cache entries along the way, so that if we ever hit this
353 // point in the IDOM tree again we'll stop immediately on a cache hit.
354 while( n != iff_dom ) { // Found post-dominating point?
355 prior_n = n;
356 n = idom(n); // Search higher
357 cache->lru_insert( prior_n, phi_post ); // Fill cache
358 } // End of while not gone high enough
359
360 return phi_post;
361 }
362
363 //------------------------------find_use_block---------------------------------
364 // Find the block a USE is in. Normally USE's are in the same block as the
365 // using instruction. For Phi-USE's, the USE is in the predecessor block
366 // along the corresponding path.
find_use_block(Node * use,Node * def,Node * old_false,Node * new_false,Node * old_true,Node * new_true)367 Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ) {
368 // CFG uses are their own block
369 if( use->is_CFG() )
370 return use;
371
372 if( use->is_Phi() ) { // Phi uses in prior block
373 // Grab the first Phi use; there may be many.
374 // Each will be handled as a separate iteration of
375 // the "while( phi->outcnt() )" loop.
376 uint j;
377 for( j = 1; j < use->req(); j++ )
378 if( use->in(j) == def )
379 break;
380 assert( j < use->req(), "def should be among use's inputs" );
381 return use->in(0)->in(j);
382 }
383 // Normal (non-phi) use
384 Node *use_blk = get_ctrl(use);
385 // Some uses are directly attached to the old (and going away)
386 // false and true branches.
387 if( use_blk == old_false ) {
388 use_blk = new_false;
389 set_ctrl(use, new_false);
390 }
391 if( use_blk == old_true ) {
392 use_blk = new_true;
393 set_ctrl(use, new_true);
394 }
395
396 if (use_blk == NULL) { // He's dead, Jim
397 _igvn.replace_node(use, C->top());
398 }
399
400 return use_blk;
401 }
402
403 //------------------------------handle_use-------------------------------------
404 // Handle uses of the merge point. Basically, split-if makes the merge point
405 // go away so all uses of the merge point must go away as well. Most block
406 // local uses have already been split-up, through the merge point. Uses from
407 // far below the merge point can't always be split up (e.g., phi-uses are
408 // pinned) and it makes too much stuff live. Instead we use a path-based
409 // solution to move uses down.
410 //
411 // If the use is along the pre-split-CFG true branch, then the new use will
412 // be from the post-split-CFG true merge point. Vice-versa for the false
413 // path. Some uses will be along both paths; then we sink the use to the
414 // post-dominating location; we may need to insert a Phi there.
handle_use(Node * use,Node * def,small_cache * cache,Node * region_dom,Node * new_false,Node * new_true,Node * old_false,Node * old_true)415 void PhaseIdealLoop::handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ) {
416
417 Node *use_blk = find_use_block(use,def,old_false,new_false,old_true,new_true);
418 if( !use_blk ) return; // He's dead, Jim
419
420 // Walk up the dominator tree until I hit either the old IfFalse, the old
421 // IfTrue or the old If. Insert Phis where needed.
422 Node *new_def = spinup( region_dom, new_false, new_true, use_blk, def, cache );
423
424 // Found where this USE goes. Re-point him.
425 uint i;
426 for( i = 0; i < use->req(); i++ )
427 if( use->in(i) == def )
428 break;
429 assert( i < use->req(), "def should be among use's inputs" );
430 _igvn.replace_input_of(use, i, new_def);
431 }
432
433 //------------------------------do_split_if------------------------------------
434 // Found an If getting its condition-code input from a Phi in the same block.
435 // Split thru the Region.
do_split_if(Node * iff)436 void PhaseIdealLoop::do_split_if( Node *iff ) {
437 if (PrintOpto && VerifyLoopOptimizations) {
438 tty->print_cr("Split-if");
439 }
440 if (TraceLoopOpts) {
441 tty->print_cr("SplitIf");
442 }
443
444 C->set_major_progress();
445 Node *region = iff->in(0);
446 Node *region_dom = idom(region);
447
448 // We are going to clone this test (and the control flow with it) up through
449 // the incoming merge point. We need to empty the current basic block.
450 // Clone any instructions which must be in this block up through the merge
451 // point.
452 DUIterator i, j;
453 bool progress = true;
454 while (progress) {
455 progress = false;
456 for (i = region->outs(); region->has_out(i); i++) {
457 Node* n = region->out(i);
458 if( n == region ) continue;
459 // The IF to be split is OK.
460 if( n == iff ) continue;
461 if( !n->is_Phi() ) { // Found pinned memory op or such
462 if (split_up(n, region, iff)) {
463 i = region->refresh_out_pos(i);
464 progress = true;
465 }
466 continue;
467 }
468 assert( n->in(0) == region, "" );
469
470 // Recursively split up all users of a Phi
471 for (j = n->outs(); n->has_out(j); j++) {
472 Node* m = n->out(j);
473 // If m is dead, throw it away, and declare progress
474 if (_nodes[m->_idx] == NULL) {
475 _igvn.remove_dead_node(m);
476 // fall through
477 }
478 else if (m != iff && split_up(m, region, iff)) {
479 // fall through
480 } else {
481 continue;
482 }
483 // Something unpredictable changed.
484 // Tell the iterators to refresh themselves, and rerun the loop.
485 i = region->refresh_out_pos(i);
486 j = region->refresh_out_pos(j);
487 progress = true;
488 }
489 }
490 }
491
492 // Now we have no instructions in the block containing the IF.
493 // Split the IF.
494 Node *new_iff = split_thru_region( iff, region );
495
496 // Replace both uses of 'new_iff' with Regions merging True/False
497 // paths. This makes 'new_iff' go dead.
498 Node *old_false = NULL, *old_true = NULL;
499 Node *new_false = NULL, *new_true = NULL;
500 for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) {
501 Node *ifp = iff->last_out(j2);
502 assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" );
503 ifp->set_req(0, new_iff);
504 Node *ifpx = split_thru_region( ifp, region );
505
506 // Replace 'If' projection of a Region with a Region of
507 // 'If' projections.
508 ifpx->set_req(0, ifpx); // A TRUE RegionNode
509
510 // Setup dominator info
511 set_idom(ifpx, region_dom, dom_depth(region_dom) + 1);
512
513 // Check for splitting loop tails
514 if( get_loop(iff)->tail() == ifp )
515 get_loop(iff)->_tail = ifpx;
516
517 // Replace in the graph with lazy-update mechanism
518 new_iff->set_req(0, new_iff); // hook self so it does not go dead
519 lazy_replace(ifp, ifpx);
520 new_iff->set_req(0, region);
521
522 // Record bits for later xforms
523 if( ifp->Opcode() == Op_IfFalse ) {
524 old_false = ifp;
525 new_false = ifpx;
526 } else {
527 old_true = ifp;
528 new_true = ifpx;
529 }
530 }
531 _igvn.remove_dead_node(new_iff);
532 // Lazy replace IDOM info with the region's dominator
533 lazy_replace(iff, region_dom);
534 lazy_update(region, region_dom); // idom must be update before handle_uses
535 region->set_req(0, NULL); // Break the self-cycle. Required for lazy_update to work on region
536
537 // Now make the original merge point go dead, by handling all its uses.
538 small_cache region_cache;
539 // Preload some control flow in region-cache
540 region_cache.lru_insert( new_false, new_false );
541 region_cache.lru_insert( new_true , new_true );
542 // Now handle all uses of the splitting block
543 for (DUIterator k = region->outs(); region->has_out(k); k++) {
544 Node* phi = region->out(k);
545 if (!phi->in(0)) { // Dead phi? Remove it
546 _igvn.remove_dead_node(phi);
547 } else if (phi == region) { // Found the self-reference
548 continue; // No roll-back of DUIterator
549 } else if (phi->is_Phi()) { // Expected common case: Phi hanging off of Region
550 assert(phi->in(0) == region, "Inconsistent graph");
551 // Need a per-def cache. Phi represents a def, so make a cache
552 small_cache phi_cache;
553
554 // Inspect all Phi uses to make the Phi go dead
555 for (DUIterator_Last lmin, l = phi->last_outs(lmin); l >= lmin; --l) {
556 Node* use = phi->last_out(l);
557 // Compute the new DEF for this USE. New DEF depends on the path
558 // taken from the original DEF to the USE. The new DEF may be some
559 // collection of PHI's merging values from different paths. The Phis
560 // inserted depend only on the location of the USE. We use a
561 // 2-element cache to handle multiple uses from the same block.
562 handle_use(use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true);
563 } // End of while phi has uses
564 // Remove the dead Phi
565 _igvn.remove_dead_node( phi );
566 } else {
567 assert(phi->in(0) == region, "Inconsistent graph");
568 // Random memory op guarded by Region. Compute new DEF for USE.
569 handle_use(phi, region, ®ion_cache, region_dom, new_false, new_true, old_false, old_true);
570 }
571 // Every path above deletes a use of the region, except for the region
572 // self-cycle (which is needed by handle_use calling find_use_block
573 // calling get_ctrl calling get_ctrl_no_update looking for dead
574 // regions). So roll back the DUIterator innards.
575 --k;
576 } // End of while merge point has phis
577
578 _igvn.remove_dead_node(region);
579
580 #ifndef PRODUCT
581 if( VerifyLoopOptimizations ) verify();
582 #endif
583 }
584