1 /*
2 * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "opto/loopnode.hpp"
27 #include "opto/addnode.hpp"
28 #include "opto/callnode.hpp"
29 #include "opto/connode.hpp"
30 #include "opto/convertnode.hpp"
31 #include "opto/loopnode.hpp"
32 #include "opto/matcher.hpp"
33 #include "opto/mulnode.hpp"
34 #include "opto/opaquenode.hpp"
35 #include "opto/rootnode.hpp"
36 #include "opto/subnode.hpp"
37 #include <fenv.h>
38 #include <math.h>
39
40 /*
41 * The general idea of Loop Predication is to insert a predicate on the entry
42 * path to a loop, and raise a uncommon trap if the check of the condition fails.
43 * The condition checks are promoted from inside the loop body, and thus
44 * the checks inside the loop could be eliminated. Currently, loop predication
45 * optimization has been applied to remove array range check and loop invariant
46 * checks (such as null checks).
47 *
48 * There are at least 3 kinds of predicates: a place holder inserted
49 * at parse time, the tests added by predication above the place
50 * holder (referred to as concrete predicates), skeleton predicates
51 * that are added between main loop and pre loop to protect C2 from
52 * inconsistencies in some rare cases of over unrolling. Skeleton
53 * predicates themselves are expanded and updated as unrolling
54 * proceeds. They don't compile to any code.
55 *
56 */
57
58 //-------------------------------register_control-------------------------
register_control(Node * n,IdealLoopTree * loop,Node * pred,bool update_body)59 void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body) {
60 assert(n->is_CFG(), "msust be control node");
61 _igvn.register_new_node_with_optimizer(n);
62 if (update_body) {
63 loop->_body.push(n);
64 }
65 set_loop(n, loop);
66 // When called from beautify_loops() idom is not constructed yet.
67 if (_idom != NULL) {
68 set_idom(n, pred, dom_depth(pred));
69 }
70 }
71
72 //------------------------------create_new_if_for_predicate------------------------
73 // create a new if above the uct_if_pattern for the predicate to be promoted.
74 //
75 // before after
76 // ---------- ----------
77 // ctrl ctrl
78 // | |
79 // | |
80 // v v
81 // iff new_iff
82 // / \ / \
83 // / \ / \
84 // v v v v
85 // uncommon_proj cont_proj if_uct if_cont
86 // \ | | | |
87 // \ | | | |
88 // v v v | v
89 // rgn loop | iff
90 // | | / \
91 // | | / \
92 // v | v v
93 // uncommon_trap | uncommon_proj cont_proj
94 // \ \ | |
95 // \ \ | |
96 // v v v v
97 // rgn loop
98 // |
99 // |
100 // v
101 // uncommon_trap
102 //
103 //
104 // We will create a region to guard the uct call if there is no one there.
105 // The continuation projection (if_cont) of the new_iff is returned which
106 // is by default a true projection if 'if_cont_is_true_proj' is true.
107 // Otherwise, the continuation projection is set up to be the false
108 // projection. This code is also used to clone predicates to cloned loops.
create_new_if_for_predicate(ProjNode * cont_proj,Node * new_entry,Deoptimization::DeoptReason reason,int opcode,bool if_cont_is_true_proj)109 ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
110 Deoptimization::DeoptReason reason,
111 int opcode, bool if_cont_is_true_proj) {
112 assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
113 IfNode* iff = cont_proj->in(0)->as_If();
114
115 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
116 Node *rgn = uncommon_proj->unique_ctrl_out();
117 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
118
119 uint proj_index = 1; // region's edge corresponding to uncommon_proj
120 if (!rgn->is_Region()) { // create a region to guard the call
121 assert(rgn->is_Call(), "must be call uct");
122 CallNode* call = rgn->as_Call();
123 IdealLoopTree* loop = get_loop(call);
124 rgn = new RegionNode(1);
125 Node* uncommon_proj_orig = uncommon_proj;
126 uncommon_proj = uncommon_proj->clone()->as_Proj();
127 register_control(uncommon_proj, loop, iff);
128 rgn->add_req(uncommon_proj);
129 register_control(rgn, loop, uncommon_proj);
130 _igvn.replace_input_of(call, 0, rgn);
131 // When called from beautify_loops() idom is not constructed yet.
132 if (_idom != NULL) {
133 set_idom(call, rgn, dom_depth(rgn));
134 }
135 // Move nodes pinned on the projection or whose control is set to
136 // the projection to the region.
137 lazy_replace(uncommon_proj_orig, rgn);
138 } else {
139 // Find region's edge corresponding to uncommon_proj
140 for (; proj_index < rgn->req(); proj_index++)
141 if (rgn->in(proj_index) == uncommon_proj) break;
142 assert(proj_index < rgn->req(), "sanity");
143 }
144
145 Node* entry = iff->in(0);
146 if (new_entry != NULL) {
147 // Clonning the predicate to new location.
148 entry = new_entry;
149 }
150 // Create new_iff
151 IdealLoopTree* lp = get_loop(entry);
152 IfNode* new_iff = NULL;
153 if (opcode == Op_If) {
154 new_iff = new IfNode(entry, iff->in(1), iff->_prob, iff->_fcnt);
155 } else {
156 assert(opcode == Op_RangeCheck, "no other if variant here");
157 new_iff = new RangeCheckNode(entry, iff->in(1), iff->_prob, iff->_fcnt);
158 }
159 register_control(new_iff, lp, entry);
160 Node* if_cont;
161 Node* if_uct;
162 if (if_cont_is_true_proj) {
163 if_cont = new IfTrueNode(new_iff);
164 if_uct = new IfFalseNode(new_iff);
165 } else {
166 if_uct = new IfTrueNode(new_iff);
167 if_cont = new IfFalseNode(new_iff);
168 }
169
170 if (cont_proj->is_IfFalse()) {
171 // Swap
172 Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
173 }
174 register_control(if_cont, lp, new_iff);
175 register_control(if_uct, get_loop(rgn), new_iff);
176
177 // if_uct to rgn
178 _igvn.hash_delete(rgn);
179 rgn->add_req(if_uct);
180 // When called from beautify_loops() idom is not constructed yet.
181 if (_idom != NULL) {
182 Node* ridom = idom(rgn);
183 Node* nrdom = dom_lca_internal(ridom, new_iff);
184 set_idom(rgn, nrdom, dom_depth(rgn));
185 }
186
187 // If rgn has phis add new edges which has the same
188 // value as on original uncommon_proj pass.
189 assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last");
190 bool has_phi = false;
191 for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) {
192 Node* use = rgn->fast_out(i);
193 if (use->is_Phi() && use->outcnt() > 0) {
194 assert(use->in(0) == rgn, "");
195 _igvn.rehash_node_delayed(use);
196 use->add_req(use->in(proj_index));
197 has_phi = true;
198 }
199 }
200 assert(!has_phi || rgn->req() > 3, "no phis when region is created");
201
202 if (new_entry == NULL) {
203 // Attach if_cont to iff
204 _igvn.replace_input_of(iff, 0, if_cont);
205 if (_idom != NULL) {
206 set_idom(iff, if_cont, dom_depth(iff));
207 }
208 }
209 return if_cont->as_Proj();
210 }
211
212 //--------------------------clone_predicate-----------------------
clone_predicate_to_unswitched_loop(ProjNode * predicate_proj,Node * new_entry,Deoptimization::DeoptReason reason)213 ProjNode* PhaseIdealLoop::clone_predicate_to_unswitched_loop(ProjNode* predicate_proj, Node* new_entry, Deoptimization::DeoptReason reason) {
214 ProjNode* new_predicate_proj = create_new_if_for_predicate(predicate_proj, new_entry, reason, Op_If);
215 IfNode* iff = new_predicate_proj->in(0)->as_If();
216 Node* ctrl = iff->in(0);
217
218 // Match original condition since predicate's projections could be swapped.
219 assert(predicate_proj->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
220 Node* opq = new Opaque1Node(C, predicate_proj->in(0)->in(1)->in(1)->in(1));
221 C->add_predicate_opaq(opq);
222 Node* bol = new Conv2BNode(opq);
223 register_new_node(opq, ctrl);
224 register_new_node(bol, ctrl);
225 _igvn.hash_delete(iff);
226 iff->set_req(1, bol);
227 return new_predicate_proj;
228 }
229
230 // Clones skeleton predicates starting at 'old_predicate_proj' by following its control inputs and rewires the control edges of in the loop from
231 // the old predicates to the new cloned predicates.
clone_skeleton_predicates_to_unswitched_loop(IdealLoopTree * loop,const Node_List & old_new,Deoptimization::DeoptReason reason,ProjNode * old_predicate_proj,ProjNode * iffast_pred,ProjNode * ifslow_pred)232 void PhaseIdealLoop::clone_skeleton_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new, Deoptimization::DeoptReason reason,
233 ProjNode* old_predicate_proj, ProjNode* iffast_pred, ProjNode* ifslow_pred) {
234 IfNode* iff = old_predicate_proj->in(0)->as_If();
235 assert(iffast_pred->in(0)->is_If() && ifslow_pred->in(0)->is_If(), "sanity check");
236 ProjNode* uncommon_proj = iff->proj_out(1 - old_predicate_proj->as_Proj()->_con);
237 Node* rgn = uncommon_proj->unique_ctrl_out();
238 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
239 assert(iff->in(1)->in(1)->Opcode() == Op_Opaque1, "unexpected predicate shape");
240 Node* predicate = iff->in(0);
241 Unique_Node_List list;
242 while (predicate != NULL && predicate->is_Proj() && predicate->in(0)->is_If()) {
243 iff = predicate->in(0)->as_If();
244 uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con);
245 if (uncommon_proj->unique_ctrl_out() != rgn)
246 break;
247 if (iff->in(1)->Opcode() == Op_Opaque4 && skeleton_predicate_has_opaque(iff)) {
248 // Only need to clone range check predicates as those can be changed and duplicated by inserting pre/main/post loops
249 // and doing loop unrolling. Push the original predicates on a list to later process them in reverse order to keep the
250 // original predicate order.
251 list.push(predicate);
252 }
253 predicate = predicate->in(0)->in(0);
254 }
255
256 Node_List to_process;
257 // Process in reverse order such that 'create_new_if_for_predicate' can be used in 'clone_skeleton_predicate_for_unswitched_loops'
258 // and the original order is maintained.
259 for (int i = list.size() - 1; i >= 0; i--) {
260 predicate = list.at(i);
261 assert(predicate->in(0)->is_If(), "must be If node");
262 iff = predicate->in(0)->as_If();
263 assert(predicate->is_Proj() && predicate->as_Proj()->is_IfProj(), "predicate must be a projection of an if node");
264 IfProjNode* predicate_proj = predicate->as_IfProj();
265
266 ProjNode* fast_proj = clone_skeleton_predicate_for_unswitched_loops(iff, predicate_proj, uncommon_proj, reason, iffast_pred, loop);
267 assert(skeleton_predicate_has_opaque(fast_proj->in(0)->as_If()), "must find skeleton predicate for fast loop");
268 ProjNode* slow_proj = clone_skeleton_predicate_for_unswitched_loops(iff, predicate_proj, uncommon_proj, reason, ifslow_pred, loop);
269 assert(skeleton_predicate_has_opaque(slow_proj->in(0)->as_If()), "must find skeleton predicate for slow loop");
270
271 // Update control dependent data nodes.
272 for (DUIterator j = predicate->outs(); predicate->has_out(j); j++) {
273 Node* fast_node = predicate->out(j);
274 if (loop->is_member(get_loop(ctrl_or_self(fast_node)))) {
275 assert(fast_node->in(0) == predicate, "only control edge");
276 Node* slow_node = old_new[fast_node->_idx];
277 assert(slow_node->in(0) == predicate, "only control edge");
278 _igvn.replace_input_of(fast_node, 0, fast_proj);
279 to_process.push(slow_node);
280 --j;
281 }
282 }
283 // Have to delay updates to the slow loop so uses of predicate are not modified while we iterate on them.
284 while (to_process.size() > 0) {
285 Node* slow_node = to_process.pop();
286 _igvn.replace_input_of(slow_node, 0, slow_proj);
287 }
288 }
289 }
290
291 // Clone a skeleton predicate for an unswitched loop. OpaqueLoopInit and OpaqueLoopStride nodes are cloned and uncommon
292 // traps are kept for the predicate (a Halt node is used later when creating pre/main/post loops and copying this cloned
293 // predicate again).
clone_skeleton_predicate_for_unswitched_loops(Node * iff,ProjNode * predicate,Node * uncommon_proj,Deoptimization::DeoptReason reason,ProjNode * output_proj,IdealLoopTree * loop)294 ProjNode* PhaseIdealLoop::clone_skeleton_predicate_for_unswitched_loops(Node* iff, ProjNode* predicate, Node* uncommon_proj,
295 Deoptimization::DeoptReason reason, ProjNode* output_proj,
296 IdealLoopTree* loop) {
297 Node* bol = clone_skeleton_predicate_bool(iff, NULL, NULL, predicate, uncommon_proj, output_proj, loop);
298 ProjNode* proj = create_new_if_for_predicate(output_proj, NULL, reason, iff->Opcode(), predicate->is_IfTrue());
299 _igvn.replace_input_of(proj->in(0), 1, bol);
300 _igvn.replace_input_of(output_proj->in(0), 0, proj);
301 set_idom(output_proj->in(0), proj, dom_depth(proj));
302 return proj;
303 }
304
305 //--------------------------clone_loop_predicates-----------------------
306 // Clone loop predicates to cloned loops when unswitching a loop.
clone_predicates_to_unswitched_loop(IdealLoopTree * loop,const Node_List & old_new,ProjNode * & iffast_pred,ProjNode * & ifslow_pred)307 void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new, ProjNode*& iffast_pred, ProjNode*& ifslow_pred) {
308 LoopNode* head = loop->_head->as_Loop();
309 bool clone_limit_check = !head->is_CountedLoop();
310 Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
311
312 // Search original predicates
313 ProjNode* limit_check_proj = NULL;
314 limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
315 if (limit_check_proj != NULL) {
316 entry = skip_loop_predicates(entry);
317 }
318 ProjNode* profile_predicate_proj = NULL;
319 ProjNode* predicate_proj = NULL;
320 if (UseProfiledLoopPredicate) {
321 profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
322 if (profile_predicate_proj != NULL) {
323 entry = skip_loop_predicates(entry);
324 }
325 }
326 if (UseLoopPredicate) {
327 predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
328 }
329 if (predicate_proj != NULL) { // right pattern that can be used by loop predication
330 // clone predicate
331 iffast_pred = clone_predicate_to_unswitched_loop(predicate_proj, iffast_pred, Deoptimization::Reason_predicate);
332 ifslow_pred = clone_predicate_to_unswitched_loop(predicate_proj, ifslow_pred, Deoptimization::Reason_predicate);
333 clone_skeleton_predicates_to_unswitched_loop(loop, old_new, Deoptimization::Reason_predicate, predicate_proj, iffast_pred, ifslow_pred);
334
335 check_created_predicate_for_unswitching(iffast_pred);
336 check_created_predicate_for_unswitching(ifslow_pred);
337 }
338 if (profile_predicate_proj != NULL) { // right pattern that can be used by loop predication
339 // clone predicate
340 iffast_pred = clone_predicate_to_unswitched_loop(profile_predicate_proj, iffast_pred, Deoptimization::Reason_profile_predicate);
341 ifslow_pred = clone_predicate_to_unswitched_loop(profile_predicate_proj, ifslow_pred, Deoptimization::Reason_profile_predicate);
342 clone_skeleton_predicates_to_unswitched_loop(loop, old_new, Deoptimization::Reason_profile_predicate, profile_predicate_proj, iffast_pred, ifslow_pred);
343
344 check_created_predicate_for_unswitching(iffast_pred);
345 check_created_predicate_for_unswitching(ifslow_pred);
346 }
347 if (limit_check_proj != NULL && clone_limit_check) {
348 // Clone loop limit check last to insert it before loop.
349 // Don't clone a limit check which was already finalized
350 // for this counted loop (only one limit check is needed).
351 iffast_pred = clone_predicate_to_unswitched_loop(limit_check_proj, iffast_pred, Deoptimization::Reason_loop_limit_check);
352 ifslow_pred = clone_predicate_to_unswitched_loop(limit_check_proj, ifslow_pred, Deoptimization::Reason_loop_limit_check);
353
354 check_created_predicate_for_unswitching(iffast_pred);
355 check_created_predicate_for_unswitching(ifslow_pred);
356 }
357 }
358
359 #ifndef PRODUCT
check_created_predicate_for_unswitching(const Node * new_entry) const360 void PhaseIdealLoop::check_created_predicate_for_unswitching(const Node* new_entry) const {
361 assert(new_entry != NULL, "IfTrue or IfFalse after clone predicate");
362 if (TraceLoopPredicate) {
363 tty->print("Loop Predicate cloned: ");
364 debug_only(new_entry->in(0)->dump(););
365 }
366 }
367 #endif
368
369
370 //--------------------------skip_loop_predicates------------------------------
371 // Skip related predicates.
skip_loop_predicates(Node * entry)372 Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) {
373 IfNode* iff = entry->in(0)->as_If();
374 ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con);
375 Node* rgn = uncommon_proj->unique_ctrl_out();
376 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
377 entry = entry->in(0)->in(0);
378 while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
379 uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con);
380 if (uncommon_proj->unique_ctrl_out() != rgn)
381 break;
382 entry = entry->in(0)->in(0);
383 }
384 return entry;
385 }
386
skip_all_loop_predicates(Node * entry)387 Node* PhaseIdealLoop::skip_all_loop_predicates(Node* entry) {
388 Node* predicate = NULL;
389 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
390 if (predicate != NULL) {
391 entry = skip_loop_predicates(entry);
392 }
393 if (UseProfiledLoopPredicate) {
394 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
395 if (predicate != NULL) { // right pattern that can be used by loop predication
396 entry = skip_loop_predicates(entry);
397 }
398 }
399 if (UseLoopPredicate) {
400 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
401 if (predicate != NULL) { // right pattern that can be used by loop predication
402 entry = skip_loop_predicates(entry);
403 }
404 }
405 return entry;
406 }
407
408 //--------------------------find_predicate_insertion_point-------------------
409 // Find a good location to insert a predicate
find_predicate_insertion_point(Node * start_c,Deoptimization::DeoptReason reason)410 ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
411 if (start_c == NULL || !start_c->is_Proj())
412 return NULL;
413 if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) {
414 return start_c->as_Proj();
415 }
416 return NULL;
417 }
418
419 //--------------------------find_predicate------------------------------------
420 // Find a predicate
find_predicate(Node * entry)421 Node* PhaseIdealLoop::find_predicate(Node* entry) {
422 Node* predicate = NULL;
423 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
424 if (predicate != NULL) { // right pattern that can be used by loop predication
425 return entry;
426 }
427 if (UseLoopPredicate) {
428 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
429 if (predicate != NULL) { // right pattern that can be used by loop predication
430 return entry;
431 }
432 }
433 if (UseProfiledLoopPredicate) {
434 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
435 if (predicate != NULL) { // right pattern that can be used by loop predication
436 return entry;
437 }
438 }
439 return NULL;
440 }
441
442 //------------------------------Invariance-----------------------------------
443 // Helper class for loop_predication_impl to compute invariance on the fly and
444 // clone invariants.
445 class Invariance : public StackObj {
446 VectorSet _visited, _invariant;
447 Node_Stack _stack;
448 VectorSet _clone_visited;
449 Node_List _old_new; // map of old to new (clone)
450 IdealLoopTree* _lpt;
451 PhaseIdealLoop* _phase;
452
453 // Helper function to set up the invariance for invariance computation
454 // If n is a known invariant, set up directly. Otherwise, look up the
455 // the possibility to push n onto the stack for further processing.
visit(Node * use,Node * n)456 void visit(Node* use, Node* n) {
457 if (_lpt->is_invariant(n)) { // known invariant
458 _invariant.set(n->_idx);
459 } else if (!n->is_CFG()) {
460 Node *n_ctrl = _phase->ctrl_or_self(n);
461 Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
462 if (_phase->is_dominator(n_ctrl, u_ctrl)) {
463 _stack.push(n, n->in(0) == NULL ? 1 : 0);
464 }
465 }
466 }
467
468 // Compute invariance for "the_node" and (possibly) all its inputs recursively
469 // on the fly
compute_invariance(Node * n)470 void compute_invariance(Node* n) {
471 assert(_visited.test(n->_idx), "must be");
472 visit(n, n);
473 while (_stack.is_nonempty()) {
474 Node* n = _stack.node();
475 uint idx = _stack.index();
476 if (idx == n->req()) { // all inputs are processed
477 _stack.pop();
478 // n is invariant if it's inputs are all invariant
479 bool all_inputs_invariant = true;
480 for (uint i = 0; i < n->req(); i++) {
481 Node* in = n->in(i);
482 if (in == NULL) continue;
483 assert(_visited.test(in->_idx), "must have visited input");
484 if (!_invariant.test(in->_idx)) { // bad guy
485 all_inputs_invariant = false;
486 break;
487 }
488 }
489 if (all_inputs_invariant) {
490 // If n's control is a predicate that was moved out of the
491 // loop, it was marked invariant but n is only invariant if
492 // it depends only on that test. Otherwise, unless that test
493 // is out of the loop, it's not invariant.
494 if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) {
495 _invariant.set(n->_idx); // I am a invariant too
496 }
497 }
498 } else { // process next input
499 _stack.set_index(idx + 1);
500 Node* m = n->in(idx);
501 if (m != NULL && !_visited.test_set(m->_idx)) {
502 visit(n, m);
503 }
504 }
505 }
506 }
507
508 // Helper function to set up _old_new map for clone_nodes.
509 // If n is a known invariant, set up directly ("clone" of n == n).
510 // Otherwise, push n onto the stack for real cloning.
clone_visit(Node * n)511 void clone_visit(Node* n) {
512 assert(_invariant.test(n->_idx), "must be invariant");
513 if (_lpt->is_invariant(n)) { // known invariant
514 _old_new.map(n->_idx, n);
515 } else { // to be cloned
516 assert(!n->is_CFG(), "should not see CFG here");
517 _stack.push(n, n->in(0) == NULL ? 1 : 0);
518 }
519 }
520
521 // Clone "n" and (possibly) all its inputs recursively
clone_nodes(Node * n,Node * ctrl)522 void clone_nodes(Node* n, Node* ctrl) {
523 clone_visit(n);
524 while (_stack.is_nonempty()) {
525 Node* n = _stack.node();
526 uint idx = _stack.index();
527 if (idx == n->req()) { // all inputs processed, clone n!
528 _stack.pop();
529 // clone invariant node
530 Node* n_cl = n->clone();
531 _old_new.map(n->_idx, n_cl);
532 _phase->register_new_node(n_cl, ctrl);
533 for (uint i = 0; i < n->req(); i++) {
534 Node* in = n_cl->in(i);
535 if (in == NULL) continue;
536 n_cl->set_req(i, _old_new[in->_idx]);
537 }
538 } else { // process next input
539 _stack.set_index(idx + 1);
540 Node* m = n->in(idx);
541 if (m != NULL && !_clone_visited.test_set(m->_idx)) {
542 clone_visit(m); // visit the input
543 }
544 }
545 }
546 }
547
548 public:
Invariance(Arena * area,IdealLoopTree * lpt)549 Invariance(Arena* area, IdealLoopTree* lpt) :
550 _visited(area), _invariant(area),
551 _stack(area, 10 /* guess */),
552 _clone_visited(area), _old_new(area),
553 _lpt(lpt), _phase(lpt->_phase)
554 {
555 LoopNode* head = _lpt->_head->as_Loop();
556 Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
557 if (entry->outcnt() != 1) {
558 // If a node is pinned between the predicates and the loop
559 // entry, we won't be able to move any node in the loop that
560 // depends on it above it in a predicate. Mark all those nodes
561 // as non loop invariatnt.
562 Unique_Node_List wq;
563 wq.push(entry);
564 for (uint next = 0; next < wq.size(); ++next) {
565 Node *n = wq.at(next);
566 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
567 Node* u = n->fast_out(i);
568 if (!u->is_CFG()) {
569 Node* c = _phase->get_ctrl(u);
570 if (_lpt->is_member(_phase->get_loop(c)) || _phase->is_dominator(c, head)) {
571 _visited.set(u->_idx);
572 wq.push(u);
573 }
574 }
575 }
576 }
577 }
578 }
579
580 // Map old to n for invariance computation and clone
map_ctrl(Node * old,Node * n)581 void map_ctrl(Node* old, Node* n) {
582 assert(old->is_CFG() && n->is_CFG(), "must be");
583 _old_new.map(old->_idx, n); // "clone" of old is n
584 _invariant.set(old->_idx); // old is invariant
585 _clone_visited.set(old->_idx);
586 }
587
588 // Driver function to compute invariance
is_invariant(Node * n)589 bool is_invariant(Node* n) {
590 if (!_visited.test_set(n->_idx))
591 compute_invariance(n);
592 return (_invariant.test(n->_idx) != 0);
593 }
594
595 // Driver function to clone invariant
clone(Node * n,Node * ctrl)596 Node* clone(Node* n, Node* ctrl) {
597 assert(ctrl->is_CFG(), "must be");
598 assert(_invariant.test(n->_idx), "must be an invariant");
599 if (!_clone_visited.test(n->_idx))
600 clone_nodes(n, ctrl);
601 return _old_new[n->_idx];
602 }
603 };
604
605 //------------------------------is_range_check_if -----------------------------------
606 // Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
607 // Note: this function is particularly designed for loop predication. We require load_range
608 // and offset to be loop invariant computed on the fly by "invar"
is_range_check_if(IfNode * iff,PhaseIdealLoop * phase,Invariance & invar) const609 bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const {
610 if (!is_loop_exit(iff)) {
611 return false;
612 }
613 if (!iff->in(1)->is_Bool()) {
614 return false;
615 }
616 const BoolNode *bol = iff->in(1)->as_Bool();
617 if (bol->_test._test != BoolTest::lt) {
618 return false;
619 }
620 if (!bol->in(1)->is_Cmp()) {
621 return false;
622 }
623 const CmpNode *cmp = bol->in(1)->as_Cmp();
624 if (cmp->Opcode() != Op_CmpU) {
625 return false;
626 }
627 Node* range = cmp->in(2);
628 if (range->Opcode() != Op_LoadRange && !iff->is_RangeCheck()) {
629 const TypeInt* tint = phase->_igvn.type(range)->isa_int();
630 if (tint == NULL || tint->empty() || tint->_lo < 0) {
631 // Allow predication on positive values that aren't LoadRanges.
632 // This allows optimization of loops where the length of the
633 // array is a known value and doesn't need to be loaded back
634 // from the array.
635 return false;
636 }
637 }
638 if (!invar.is_invariant(range)) {
639 return false;
640 }
641 Node *iv = _head->as_CountedLoop()->phi();
642 int scale = 0;
643 Node *offset = NULL;
644 if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) {
645 return false;
646 }
647 if (offset && !invar.is_invariant(offset)) { // offset must be invariant
648 return false;
649 }
650 return true;
651 }
652
653 //------------------------------rc_predicate-----------------------------------
654 // Create a range check predicate
655 //
656 // for (i = init; i < limit; i += stride) {
657 // a[scale*i+offset]
658 // }
659 //
660 // Compute max(scale*i + offset) for init <= i < limit and build the predicate
661 // as "max(scale*i + offset) u< a.length".
662 //
663 // There are two cases for max(scale*i + offset):
664 // (1) stride*scale > 0
665 // max(scale*i + offset) = scale*(limit-stride) + offset
666 // (2) stride*scale < 0
667 // max(scale*i + offset) = scale*init + offset
rc_predicate(IdealLoopTree * loop,Node * ctrl,int scale,Node * offset,Node * init,Node * limit,jint stride,Node * range,bool upper,bool & overflow)668 BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl,
669 int scale, Node* offset,
670 Node* init, Node* limit, jint stride,
671 Node* range, bool upper, bool &overflow) {
672 jint con_limit = (limit != NULL && limit->is_Con()) ? limit->get_int() : 0;
673 jint con_init = init->is_Con() ? init->get_int() : 0;
674 jint con_offset = offset->is_Con() ? offset->get_int() : 0;
675
676 stringStream* predString = NULL;
677 if (TraceLoopPredicate) {
678 predString = new stringStream();
679 predString->print("rc_predicate ");
680 }
681
682 overflow = false;
683 Node* max_idx_expr = NULL;
684 const TypeInt* idx_type = TypeInt::INT;
685 if ((stride > 0) == (scale > 0) == upper) {
686 guarantee(limit != NULL, "sanity");
687 if (TraceLoopPredicate) {
688 if (limit->is_Con()) {
689 predString->print("(%d ", con_limit);
690 } else {
691 predString->print("(limit ");
692 }
693 predString->print("- %d) ", stride);
694 }
695 // Check if (limit - stride) may overflow
696 const TypeInt* limit_type = _igvn.type(limit)->isa_int();
697 jint limit_lo = limit_type->_lo;
698 jint limit_hi = limit_type->_hi;
699 if ((stride > 0 && (java_subtract(limit_lo, stride) < limit_lo)) ||
700 (stride < 0 && (java_subtract(limit_hi, stride) > limit_hi))) {
701 // No overflow possible
702 ConINode* con_stride = _igvn.intcon(stride);
703 set_ctrl(con_stride, C->root());
704 max_idx_expr = new SubINode(limit, con_stride);
705 idx_type = TypeInt::make(limit_lo - stride, limit_hi - stride, limit_type->_widen);
706 } else {
707 // May overflow
708 overflow = true;
709 limit = new ConvI2LNode(limit);
710 register_new_node(limit, ctrl);
711 ConLNode* con_stride = _igvn.longcon(stride);
712 set_ctrl(con_stride, C->root());
713 max_idx_expr = new SubLNode(limit, con_stride);
714 }
715 register_new_node(max_idx_expr, ctrl);
716 } else {
717 if (TraceLoopPredicate) {
718 if (init->is_Con()) {
719 predString->print("%d ", con_init);
720 } else {
721 predString->print("init ");
722 }
723 }
724 idx_type = _igvn.type(init)->isa_int();
725 max_idx_expr = init;
726 }
727
728 if (scale != 1) {
729 ConNode* con_scale = _igvn.intcon(scale);
730 set_ctrl(con_scale, C->root());
731 if (TraceLoopPredicate) {
732 predString->print("* %d ", scale);
733 }
734 // Check if (scale * max_idx_expr) may overflow
735 const TypeInt* scale_type = TypeInt::make(scale);
736 MulINode* mul = new MulINode(max_idx_expr, con_scale);
737 idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type);
738 if (overflow || TypeInt::INT->higher_equal(idx_type)) {
739 // May overflow
740 mul->destruct(&_igvn);
741 if (!overflow) {
742 max_idx_expr = new ConvI2LNode(max_idx_expr);
743 register_new_node(max_idx_expr, ctrl);
744 }
745 overflow = true;
746 con_scale = _igvn.longcon(scale);
747 set_ctrl(con_scale, C->root());
748 max_idx_expr = new MulLNode(max_idx_expr, con_scale);
749 } else {
750 // No overflow possible
751 max_idx_expr = mul;
752 }
753 register_new_node(max_idx_expr, ctrl);
754 }
755
756 if (offset && (!offset->is_Con() || con_offset != 0)){
757 if (TraceLoopPredicate) {
758 if (offset->is_Con()) {
759 predString->print("+ %d ", con_offset);
760 } else {
761 predString->print("+ offset");
762 }
763 }
764 // Check if (max_idx_expr + offset) may overflow
765 const TypeInt* offset_type = _igvn.type(offset)->isa_int();
766 jint lo = java_add(idx_type->_lo, offset_type->_lo);
767 jint hi = java_add(idx_type->_hi, offset_type->_hi);
768 if (overflow || (lo > hi) ||
769 ((idx_type->_lo & offset_type->_lo) < 0 && lo >= 0) ||
770 ((~(idx_type->_hi | offset_type->_hi)) < 0 && hi < 0)) {
771 // May overflow
772 if (!overflow) {
773 max_idx_expr = new ConvI2LNode(max_idx_expr);
774 register_new_node(max_idx_expr, ctrl);
775 }
776 overflow = true;
777 offset = new ConvI2LNode(offset);
778 register_new_node(offset, ctrl);
779 max_idx_expr = new AddLNode(max_idx_expr, offset);
780 } else {
781 // No overflow possible
782 max_idx_expr = new AddINode(max_idx_expr, offset);
783 }
784 register_new_node(max_idx_expr, ctrl);
785 }
786
787 CmpNode* cmp = NULL;
788 if (overflow) {
789 // Integer expressions may overflow, do long comparison
790 range = new ConvI2LNode(range);
791 register_new_node(range, ctrl);
792 cmp = new CmpULNode(max_idx_expr, range);
793 } else {
794 cmp = new CmpUNode(max_idx_expr, range);
795 }
796 register_new_node(cmp, ctrl);
797 BoolNode* bol = new BoolNode(cmp, BoolTest::lt);
798 register_new_node(bol, ctrl);
799
800 if (TraceLoopPredicate) {
801 predString->print_cr("<u range");
802 tty->print("%s", predString->base());
803 predString->~stringStream();
804 }
805 return bol;
806 }
807
808 // Should loop predication look not only in the path from tail to head
809 // but also in branches of the loop body?
loop_predication_should_follow_branches(IdealLoopTree * loop,ProjNode * predicate_proj,float & loop_trip_cnt)810 bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop, ProjNode *predicate_proj, float& loop_trip_cnt) {
811 if (!UseProfiledLoopPredicate) {
812 return false;
813 }
814
815 if (predicate_proj == NULL) {
816 return false;
817 }
818
819 LoopNode* head = loop->_head->as_Loop();
820 bool follow_branches = true;
821 IdealLoopTree* l = loop->_child;
822 // For leaf loops and loops with a single inner loop
823 while (l != NULL && follow_branches) {
824 IdealLoopTree* child = l;
825 if (child->_child != NULL &&
826 child->_head->is_OuterStripMinedLoop()) {
827 assert(child->_child->_next == NULL, "only one inner loop for strip mined loop");
828 assert(child->_child->_head->is_CountedLoop() && child->_child->_head->as_CountedLoop()->is_strip_mined(), "inner loop should be strip mined");
829 child = child->_child;
830 }
831 if (child->_child != NULL || child->_irreducible) {
832 follow_branches = false;
833 }
834 l = l->_next;
835 }
836 if (follow_branches) {
837 loop->compute_profile_trip_cnt(this);
838 if (head->is_profile_trip_failed()) {
839 follow_branches = false;
840 } else {
841 loop_trip_cnt = head->profile_trip_cnt();
842 if (head->is_CountedLoop()) {
843 CountedLoopNode* cl = head->as_CountedLoop();
844 if (cl->phi() != NULL) {
845 const TypeInt* t = _igvn.type(cl->phi())->is_int();
846 float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS(cl->stride_con());
847 if (worst_case_trip_cnt < loop_trip_cnt) {
848 loop_trip_cnt = worst_case_trip_cnt;
849 }
850 }
851 }
852 }
853 }
854 return follow_branches;
855 }
856
857 // Compute probability of reaching some CFG node from a fixed
858 // dominating CFG node
859 class PathFrequency {
860 private:
861 Node* _dom; // frequencies are computed relative to this node
862 Node_Stack _stack;
863 GrowableArray<float> _freqs_stack; // keep track of intermediate result at regions
864 GrowableArray<float> _freqs; // cache frequencies
865 PhaseIdealLoop* _phase;
866
set_rounding(int mode)867 void set_rounding(int mode) {
868 // fesetround is broken on windows
869 NOT_WINDOWS(fesetround(mode);)
870 }
871
check_frequency(float f)872 void check_frequency(float f) {
873 NOT_WINDOWS(assert(f <= 1 && f >= 0, "Incorrect frequency");)
874 }
875
876 public:
PathFrequency(Node * dom,PhaseIdealLoop * phase)877 PathFrequency(Node* dom, PhaseIdealLoop* phase)
878 : _dom(dom), _stack(0), _phase(phase) {
879 }
880
to(Node * n)881 float to(Node* n) {
882 // post order walk on the CFG graph from n to _dom
883 set_rounding(FE_TOWARDZERO); // make sure rounding doesn't push frequency above 1
884 IdealLoopTree* loop = _phase->get_loop(_dom);
885 Node* c = n;
886 for (;;) {
887 assert(_phase->get_loop(c) == loop, "have to be in the same loop");
888 if (c == _dom || _freqs.at_grow(c->_idx, -1) >= 0) {
889 float f = c == _dom ? 1 : _freqs.at(c->_idx);
890 Node* prev = c;
891 while (_stack.size() > 0 && prev == c) {
892 Node* n = _stack.node();
893 if (!n->is_Region()) {
894 if (_phase->get_loop(n) != _phase->get_loop(n->in(0))) {
895 // Found an inner loop: compute frequency of reaching this
896 // exit from the loop head by looking at the number of
897 // times each loop exit was taken
898 IdealLoopTree* inner_loop = _phase->get_loop(n->in(0));
899 LoopNode* inner_head = inner_loop->_head->as_Loop();
900 assert(_phase->get_loop(n) == loop, "only 1 inner loop");
901 if (inner_head->is_OuterStripMinedLoop()) {
902 inner_head->verify_strip_mined(1);
903 if (n->in(0) == inner_head->in(LoopNode::LoopBackControl)->in(0)) {
904 n = n->in(0)->in(0)->in(0);
905 }
906 inner_loop = inner_loop->_child;
907 inner_head = inner_loop->_head->as_Loop();
908 inner_head->verify_strip_mined(1);
909 }
910 set_rounding(FE_UPWARD); // make sure rounding doesn't push frequency above 1
911 float loop_exit_cnt = 0.0f;
912 for (uint i = 0; i < inner_loop->_body.size(); i++) {
913 Node *n = inner_loop->_body[i];
914 float c = inner_loop->compute_profile_trip_cnt_helper(n);
915 loop_exit_cnt += c;
916 }
917 set_rounding(FE_TOWARDZERO);
918 float cnt = -1;
919 if (n->in(0)->is_If()) {
920 IfNode* iff = n->in(0)->as_If();
921 float p = n->in(0)->as_If()->_prob;
922 if (n->Opcode() == Op_IfFalse) {
923 p = 1 - p;
924 }
925 if (p > PROB_MIN) {
926 cnt = p * iff->_fcnt;
927 } else {
928 cnt = 0;
929 }
930 } else {
931 assert(n->in(0)->is_Jump(), "unsupported node kind");
932 JumpNode* jmp = n->in(0)->as_Jump();
933 float p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con];
934 cnt = p * jmp->_fcnt;
935 }
936 float this_exit_f = cnt > 0 ? cnt / loop_exit_cnt : 0;
937 check_frequency(this_exit_f);
938 f = f * this_exit_f;
939 check_frequency(f);
940 } else {
941 float p = -1;
942 if (n->in(0)->is_If()) {
943 p = n->in(0)->as_If()->_prob;
944 if (n->Opcode() == Op_IfFalse) {
945 p = 1 - p;
946 }
947 } else {
948 assert(n->in(0)->is_Jump(), "unsupported node kind");
949 p = n->in(0)->as_Jump()->_probs[n->as_JumpProj()->_con];
950 }
951 f = f * p;
952 check_frequency(f);
953 }
954 _freqs.at_put_grow(n->_idx, (float)f, -1);
955 _stack.pop();
956 } else {
957 float prev_f = _freqs_stack.pop();
958 float new_f = f;
959 f = new_f + prev_f;
960 check_frequency(f);
961 uint i = _stack.index();
962 if (i < n->req()) {
963 c = n->in(i);
964 _stack.set_index(i+1);
965 _freqs_stack.push(f);
966 } else {
967 _freqs.at_put_grow(n->_idx, f, -1);
968 _stack.pop();
969 }
970 }
971 }
972 if (_stack.size() == 0) {
973 set_rounding(FE_TONEAREST);
974 check_frequency(f);
975 return f;
976 }
977 } else if (c->is_Loop()) {
978 ShouldNotReachHere();
979 c = c->in(LoopNode::EntryControl);
980 } else if (c->is_Region()) {
981 _freqs_stack.push(0);
982 _stack.push(c, 2);
983 c = c->in(1);
984 } else {
985 if (c->is_IfProj()) {
986 IfNode* iff = c->in(0)->as_If();
987 if (iff->_prob == PROB_UNKNOWN) {
988 // assume never taken
989 _freqs.at_put_grow(c->_idx, 0, -1);
990 } else if (_phase->get_loop(c) != _phase->get_loop(iff)) {
991 if (iff->_fcnt == COUNT_UNKNOWN) {
992 // assume never taken
993 _freqs.at_put_grow(c->_idx, 0, -1);
994 } else {
995 // skip over loop
996 _stack.push(c, 1);
997 c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
998 }
999 } else {
1000 _stack.push(c, 1);
1001 c = iff;
1002 }
1003 } else if (c->is_JumpProj()) {
1004 JumpNode* jmp = c->in(0)->as_Jump();
1005 if (_phase->get_loop(c) != _phase->get_loop(jmp)) {
1006 if (jmp->_fcnt == COUNT_UNKNOWN) {
1007 // assume never taken
1008 _freqs.at_put_grow(c->_idx, 0, -1);
1009 } else {
1010 // skip over loop
1011 _stack.push(c, 1);
1012 c = _phase->get_loop(c->in(0))->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
1013 }
1014 } else {
1015 _stack.push(c, 1);
1016 c = jmp;
1017 }
1018 } else if (c->Opcode() == Op_CatchProj &&
1019 c->in(0)->Opcode() == Op_Catch &&
1020 c->in(0)->in(0)->is_Proj() &&
1021 c->in(0)->in(0)->in(0)->is_Call()) {
1022 // assume exceptions are never thrown
1023 uint con = c->as_Proj()->_con;
1024 if (con == CatchProjNode::fall_through_index) {
1025 Node* call = c->in(0)->in(0)->in(0)->in(0);
1026 if (_phase->get_loop(call) != _phase->get_loop(c)) {
1027 _freqs.at_put_grow(c->_idx, 0, -1);
1028 } else {
1029 c = call;
1030 }
1031 } else {
1032 assert(con >= CatchProjNode::catch_all_index, "what else?");
1033 _freqs.at_put_grow(c->_idx, 0, -1);
1034 }
1035 } else if (c->unique_ctrl_out() == NULL && !c->is_If() && !c->is_Jump()) {
1036 ShouldNotReachHere();
1037 } else {
1038 c = c->in(0);
1039 }
1040 }
1041 }
1042 ShouldNotReachHere();
1043 return -1;
1044 }
1045 };
1046
loop_predication_follow_branches(Node * n,IdealLoopTree * loop,float loop_trip_cnt,PathFrequency & pf,Node_Stack & stack,VectorSet & seen,Node_List & if_proj_list)1047 void PhaseIdealLoop::loop_predication_follow_branches(Node *n, IdealLoopTree *loop, float loop_trip_cnt,
1048 PathFrequency& pf, Node_Stack& stack, VectorSet& seen,
1049 Node_List& if_proj_list) {
1050 assert(n->is_Region(), "start from a region");
1051 Node* tail = loop->tail();
1052 stack.push(n, 1);
1053 do {
1054 Node* c = stack.node();
1055 assert(c->is_Region() || c->is_IfProj(), "only region here");
1056 uint i = stack.index();
1057
1058 if (i < c->req()) {
1059 stack.set_index(i+1);
1060 Node* in = c->in(i);
1061 while (!is_dominator(in, tail) && !seen.test_set(in->_idx)) {
1062 IdealLoopTree* in_loop = get_loop(in);
1063 if (in_loop != loop) {
1064 in = in_loop->_head->in(LoopNode::EntryControl);
1065 } else if (in->is_Region()) {
1066 stack.push(in, 1);
1067 break;
1068 } else if (in->is_IfProj() &&
1069 in->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1070 (in->in(0)->Opcode() == Op_If ||
1071 in->in(0)->Opcode() == Op_RangeCheck)) {
1072 if (pf.to(in) * loop_trip_cnt >= 1) {
1073 stack.push(in, 1);
1074 }
1075 in = in->in(0);
1076 } else {
1077 in = in->in(0);
1078 }
1079 }
1080 } else {
1081 if (c->is_IfProj()) {
1082 if_proj_list.push(c);
1083 }
1084 stack.pop();
1085 }
1086
1087 } while (stack.size() > 0);
1088 }
1089
1090
loop_predication_impl_helper(IdealLoopTree * loop,ProjNode * proj,ProjNode * predicate_proj,CountedLoopNode * cl,ConNode * zero,Invariance & invar,Deoptimization::DeoptReason reason)1091 bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* proj, ProjNode *predicate_proj,
1092 CountedLoopNode *cl, ConNode* zero, Invariance& invar,
1093 Deoptimization::DeoptReason reason) {
1094 // Following are changed to nonnull when a predicate can be hoisted
1095 ProjNode* new_predicate_proj = NULL;
1096 IfNode* iff = proj->in(0)->as_If();
1097 Node* test = iff->in(1);
1098 if (!test->is_Bool()){ //Conv2B, ...
1099 return false;
1100 }
1101 BoolNode* bol = test->as_Bool();
1102 if (invar.is_invariant(bol)) {
1103 // Invariant test
1104 new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL,
1105 reason,
1106 iff->Opcode());
1107 Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
1108 BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
1109
1110 // Negate test if necessary
1111 bool negated = false;
1112 if (proj->_con != predicate_proj->_con) {
1113 new_predicate_bol = new BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
1114 register_new_node(new_predicate_bol, ctrl);
1115 negated = true;
1116 }
1117 IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
1118 _igvn.hash_delete(new_predicate_iff);
1119 new_predicate_iff->set_req(1, new_predicate_bol);
1120 #ifndef PRODUCT
1121 if (TraceLoopPredicate) {
1122 tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx);
1123 loop->dump_head();
1124 } else if (TraceLoopOpts) {
1125 tty->print("Predicate IC ");
1126 loop->dump_head();
1127 }
1128 #endif
1129 } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
1130 // Range check for counted loops
1131 const Node* cmp = bol->in(1)->as_Cmp();
1132 Node* idx = cmp->in(1);
1133 assert(!invar.is_invariant(idx), "index is variant");
1134 Node* rng = cmp->in(2);
1135 assert(rng->Opcode() == Op_LoadRange || iff->is_RangeCheck() || _igvn.type(rng)->is_int()->_lo >= 0, "must be");
1136 assert(invar.is_invariant(rng), "range must be invariant");
1137 int scale = 1;
1138 Node* offset = zero;
1139 bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
1140 assert(ok, "must be index expression");
1141
1142 Node* init = cl->init_trip();
1143 // Limit is not exact.
1144 // Calculate exact limit here.
1145 // Note, counted loop's test is '<' or '>'.
1146 Node* limit = exact_limit(loop);
1147 int stride = cl->stride()->get_int();
1148
1149 // Build if's for the upper and lower bound tests. The
1150 // lower_bound test will dominate the upper bound test and all
1151 // cloned or created nodes will use the lower bound test as
1152 // their declared control.
1153
1154 // Perform cloning to keep Invariance state correct since the
1155 // late schedule will place invariant things in the loop.
1156 Node *ctrl = predicate_proj->in(0)->as_If()->in(0);
1157 rng = invar.clone(rng, ctrl);
1158 if (offset && offset != zero) {
1159 assert(invar.is_invariant(offset), "offset must be loop invariant");
1160 offset = invar.clone(offset, ctrl);
1161 }
1162 // If predicate expressions may overflow in the integer range, longs are used.
1163 bool overflow = false;
1164
1165 // Test the lower bound
1166 BoolNode* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false, overflow);
1167 // Negate test if necessary
1168 bool negated = false;
1169 if (proj->_con != predicate_proj->_con) {
1170 lower_bound_bol = new BoolNode(lower_bound_bol->in(1), lower_bound_bol->_test.negate());
1171 register_new_node(lower_bound_bol, ctrl);
1172 negated = true;
1173 }
1174 ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
1175 IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
1176 _igvn.hash_delete(lower_bound_iff);
1177 lower_bound_iff->set_req(1, lower_bound_bol);
1178 if (TraceLoopPredicate) tty->print_cr("lower bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
1179
1180 // Test the upper bound
1181 BoolNode* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true, overflow);
1182 negated = false;
1183 if (proj->_con != predicate_proj->_con) {
1184 upper_bound_bol = new BoolNode(upper_bound_bol->in(1), upper_bound_bol->_test.negate());
1185 register_new_node(upper_bound_bol, ctrl);
1186 negated = true;
1187 }
1188 ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
1189 assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
1190 IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
1191 _igvn.hash_delete(upper_bound_iff);
1192 upper_bound_iff->set_req(1, upper_bound_bol);
1193 if (TraceLoopPredicate) tty->print_cr("upper bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
1194
1195 // Fall through into rest of the clean up code which will move
1196 // any dependent nodes onto the upper bound test.
1197 new_predicate_proj = upper_bound_proj;
1198
1199 if (iff->is_RangeCheck()) {
1200 new_predicate_proj = insert_initial_skeleton_predicate(iff, loop, proj, predicate_proj, upper_bound_proj, scale, offset, init, limit, stride, rng, overflow, reason);
1201 }
1202
1203 #ifndef PRODUCT
1204 if (TraceLoopOpts && !TraceLoopPredicate) {
1205 tty->print("Predicate RC ");
1206 loop->dump_head();
1207 }
1208 #endif
1209 } else {
1210 // Loop variant check (for example, range check in non-counted loop)
1211 // with uncommon trap.
1212 return false;
1213 }
1214 assert(new_predicate_proj != NULL, "sanity");
1215 // Success - attach condition (new_predicate_bol) to predicate if
1216 invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
1217
1218 // Eliminate the old If in the loop body
1219 dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con );
1220
1221 C->set_major_progress();
1222 return true;
1223 }
1224
1225
1226 // After pre/main/post loops are created, we'll put a copy of some
1227 // range checks between the pre and main loop to validate the value
1228 // of the main loop induction variable. Make a copy of the predicates
1229 // here with an opaque node as a place holder for the value (will be
1230 // updated by PhaseIdealLoop::clone_skeleton_predicate()).
insert_initial_skeleton_predicate(IfNode * iff,IdealLoopTree * loop,ProjNode * proj,ProjNode * predicate_proj,ProjNode * upper_bound_proj,int scale,Node * offset,Node * init,Node * limit,jint stride,Node * rng,bool & overflow,Deoptimization::DeoptReason reason)1231 ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLoopTree *loop,
1232 ProjNode* proj, ProjNode *predicate_proj,
1233 ProjNode* upper_bound_proj,
1234 int scale, Node* offset,
1235 Node* init, Node* limit, jint stride,
1236 Node* rng, bool &overflow,
1237 Deoptimization::DeoptReason reason) {
1238 // First predicate for the initial value on first loop iteration
1239 assert(proj->_con && predicate_proj->_con, "not a range check?");
1240 Node* opaque_init = new OpaqueLoopInitNode(C, init);
1241 register_new_node(opaque_init, upper_bound_proj);
1242 BoolNode* bol = rc_predicate(loop, upper_bound_proj, scale, offset, opaque_init, limit, stride, rng, (stride > 0) != (scale > 0), overflow);
1243 Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over
1244 register_new_node(opaque_bol, upper_bound_proj);
1245 ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
1246 _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
1247 assert(opaque_init->outcnt() > 0, "should be used");
1248
1249 // Second predicate for init + (current stride - initial stride)
1250 // This is identical to the previous predicate initially but as
1251 // unrolling proceeds current stride is updated.
1252 Node* init_stride = loop->_head->as_CountedLoop()->stride();
1253 Node* opaque_stride = new OpaqueLoopStrideNode(C, init_stride);
1254 register_new_node(opaque_stride, new_proj);
1255 Node* max_value = new SubINode(opaque_stride, init_stride);
1256 register_new_node(max_value, new_proj);
1257 max_value = new AddINode(opaque_init, max_value);
1258 register_new_node(max_value, new_proj);
1259 bol = rc_predicate(loop, new_proj, scale, offset, max_value, limit, stride, rng, (stride > 0) != (scale > 0), overflow);
1260 opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1));
1261 register_new_node(opaque_bol, new_proj);
1262 new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
1263 _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
1264 assert(max_value->outcnt() > 0, "should be used");
1265
1266 return new_proj;
1267 }
1268
1269 //------------------------------ loop_predication_impl--------------------------
1270 // Insert loop predicates for null checks and range checks
loop_predication_impl(IdealLoopTree * loop)1271 bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
1272 if (!UseLoopPredicate) return false;
1273
1274 if (!loop->_head->is_Loop()) {
1275 // Could be a simple region when irreducible loops are present.
1276 return false;
1277 }
1278 LoopNode* head = loop->_head->as_Loop();
1279
1280 if (head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
1281 // do nothing for infinite loops
1282 return false;
1283 }
1284
1285 if (head->is_OuterStripMinedLoop()) {
1286 return false;
1287 }
1288
1289 CountedLoopNode *cl = NULL;
1290 if (head->is_valid_counted_loop(T_INT)) {
1291 cl = head->as_CountedLoop();
1292 // do nothing for iteration-splitted loops
1293 if (!cl->is_normal_loop()) return false;
1294 // Avoid RCE if Counted loop's test is '!='.
1295 BoolTest::mask bt = cl->loopexit()->test_trip();
1296 if (bt != BoolTest::lt && bt != BoolTest::gt)
1297 cl = NULL;
1298 }
1299
1300 Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl);
1301 ProjNode *loop_limit_proj = NULL;
1302 ProjNode *predicate_proj = NULL;
1303 ProjNode *profile_predicate_proj = NULL;
1304 // Loop limit check predicate should be near the loop.
1305 loop_limit_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
1306 if (loop_limit_proj != NULL) {
1307 entry = skip_loop_predicates(loop_limit_proj);
1308 }
1309 bool has_profile_predicates = false;
1310 profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate);
1311 if (profile_predicate_proj != NULL) {
1312 Node* n = skip_loop_predicates(entry);
1313 // Check if predicates were already added to the profile predicate
1314 // block
1315 if (n != entry->in(0)->in(0) || n->outcnt() != 1) {
1316 has_profile_predicates = true;
1317 }
1318 entry = n;
1319 }
1320 predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
1321
1322 float loop_trip_cnt = -1;
1323 bool follow_branches = loop_predication_should_follow_branches(loop, profile_predicate_proj, loop_trip_cnt);
1324 assert(!follow_branches || loop_trip_cnt >= 0, "negative trip count?");
1325
1326 if (predicate_proj == NULL && !follow_branches) {
1327 #ifndef PRODUCT
1328 if (TraceLoopPredicate) {
1329 tty->print("missing predicate:");
1330 loop->dump_head();
1331 head->dump(1);
1332 }
1333 #endif
1334 return false;
1335 }
1336 ConNode* zero = _igvn.intcon(0);
1337 set_ctrl(zero, C->root());
1338
1339 ResourceArea* area = Thread::current()->resource_area();
1340 Invariance invar(area, loop);
1341
1342 // Create list of if-projs such that a newer proj dominates all older
1343 // projs in the list, and they all dominate loop->tail()
1344 Node_List if_proj_list;
1345 Node_List regions;
1346 Node* current_proj = loop->tail(); // start from tail
1347
1348
1349 Node_List controls;
1350 while (current_proj != head) {
1351 if (loop == get_loop(current_proj) && // still in the loop ?
1352 current_proj->is_Proj() && // is a projection ?
1353 (current_proj->in(0)->Opcode() == Op_If ||
1354 current_proj->in(0)->Opcode() == Op_RangeCheck)) { // is a if projection ?
1355 if_proj_list.push(current_proj);
1356 }
1357 if (follow_branches &&
1358 current_proj->Opcode() == Op_Region &&
1359 loop == get_loop(current_proj)) {
1360 regions.push(current_proj);
1361 }
1362 current_proj = idom(current_proj);
1363 }
1364
1365 bool hoisted = false; // true if at least one proj is promoted
1366
1367 if (!has_profile_predicates) {
1368 while (if_proj_list.size() > 0) {
1369 Node* n = if_proj_list.pop();
1370
1371 ProjNode* proj = n->as_Proj();
1372 IfNode* iff = proj->in(0)->as_If();
1373
1374 CallStaticJavaNode* call = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1375 if (call == NULL) {
1376 if (loop->is_loop_exit(iff)) {
1377 // stop processing the remaining projs in the list because the execution of them
1378 // depends on the condition of "iff" (iff->in(1)).
1379 break;
1380 } else {
1381 // Both arms are inside the loop. There are two cases:
1382 // (1) there is one backward branch. In this case, any remaining proj
1383 // in the if_proj list post-dominates "iff". So, the condition of "iff"
1384 // does not determine the execution the remining projs directly, and we
1385 // can safely continue.
1386 // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
1387 // does not dominate loop->tail(), so it can not be in the if_proj list.
1388 continue;
1389 }
1390 }
1391 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(call->uncommon_trap_request());
1392 if (reason == Deoptimization::Reason_predicate) {
1393 break;
1394 }
1395
1396 if (predicate_proj != NULL) {
1397 hoisted = loop_predication_impl_helper(loop, proj, predicate_proj, cl, zero, invar, Deoptimization::Reason_predicate) | hoisted;
1398 }
1399 } // end while
1400 }
1401
1402 if (follow_branches) {
1403 PathFrequency pf(loop->_head, this);
1404
1405 // Some projections were skipped by regular predicates because of
1406 // an early loop exit. Try them with profile data.
1407 while (if_proj_list.size() > 0) {
1408 Node* proj = if_proj_list.pop();
1409 float f = pf.to(proj);
1410 if (proj->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1411 f * loop_trip_cnt >= 1) {
1412 hoisted = loop_predication_impl_helper(loop, proj->as_Proj(), profile_predicate_proj, cl, zero, invar, Deoptimization::Reason_profile_predicate) | hoisted;
1413 }
1414 }
1415
1416 // And look into all branches
1417 Node_Stack stack(0);
1418 VectorSet seen;
1419 Node_List if_proj_list_freq(area);
1420 while (regions.size() > 0) {
1421 Node* c = regions.pop();
1422 loop_predication_follow_branches(c, loop, loop_trip_cnt, pf, stack, seen, if_proj_list_freq);
1423 }
1424
1425 for (uint i = 0; i < if_proj_list_freq.size(); i++) {
1426 ProjNode* proj = if_proj_list_freq.at(i)->as_Proj();
1427 hoisted = loop_predication_impl_helper(loop, proj, profile_predicate_proj, cl, zero, invar, Deoptimization::Reason_profile_predicate) | hoisted;
1428 }
1429 }
1430
1431 #ifndef PRODUCT
1432 // report that the loop predication has been actually performed
1433 // for this loop
1434 if (TraceLoopPredicate && hoisted) {
1435 tty->print("Loop Predication Performed:");
1436 loop->dump_head();
1437 }
1438 #endif
1439
1440 head->verify_strip_mined(1);
1441
1442 return hoisted;
1443 }
1444
1445 //------------------------------loop_predication--------------------------------
1446 // driver routine for loop predication optimization
loop_predication(PhaseIdealLoop * phase)1447 bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
1448 bool hoisted = false;
1449 // Recursively promote predicates
1450 if (_child) {
1451 hoisted = _child->loop_predication( phase);
1452 }
1453
1454 // self
1455 if (!_irreducible && !tail()->is_top()) {
1456 hoisted |= phase->loop_predication_impl(this);
1457 }
1458
1459 if (_next) { //sibling
1460 hoisted |= _next->loop_predication( phase);
1461 }
1462
1463 return hoisted;
1464 }
1465