1 /*
2 * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_LOOPNODE_HPP
26 #define SHARE_OPTO_LOOPNODE_HPP
27
28 #include "opto/cfgnode.hpp"
29 #include "opto/multnode.hpp"
30 #include "opto/phaseX.hpp"
31 #include "opto/subnode.hpp"
32 #include "opto/type.hpp"
33
34 class CmpNode;
35 class BaseCountedLoopEndNode;
36 class CountedLoopNode;
37 class IdealLoopTree;
38 class LoopNode;
39 class Node;
40 class OuterStripMinedLoopEndNode;
41 class PathFrequency;
42 class PhaseIdealLoop;
43 class CountedLoopReserveKit;
44 class VectorSet;
45 class Invariance;
46 struct small_cache;
47
48 //
49 // I D E A L I Z E D L O O P S
50 //
51 // Idealized loops are the set of loops I perform more interesting
52 // transformations on, beyond simple hoisting.
53
54 //------------------------------LoopNode---------------------------------------
55 // Simple loop header. Fall in path on left, loop-back path on right.
56 class LoopNode : public RegionNode {
57 // Size is bigger to hold the flags. However, the flags do not change
58 // the semantics so it does not appear in the hash & cmp functions.
size_of() const59 virtual uint size_of() const { return sizeof(*this); }
60 protected:
61 uint _loop_flags;
62 // Names for flag bitfields
63 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
64 MainHasNoPreLoop = 1<<2,
65 HasExactTripCount = 1<<3,
66 InnerLoop = 1<<4,
67 PartialPeelLoop = 1<<5,
68 PartialPeelFailed = 1<<6,
69 HasReductions = 1<<7,
70 WasSlpAnalyzed = 1<<8,
71 PassedSlpAnalysis = 1<<9,
72 DoUnrollOnly = 1<<10,
73 VectorizedLoop = 1<<11,
74 HasAtomicPostLoop = 1<<12,
75 HasRangeChecks = 1<<13,
76 IsMultiversioned = 1<<14,
77 StripMined = 1<<15,
78 SubwordLoop = 1<<16,
79 ProfileTripFailed = 1<<17,
80 TransformedLongInnerLoop = 1<<18,
81 TransformedLongOuterLoop = 1<<19};
82 char _unswitch_count;
83 enum { _unswitch_max=3 };
84 char _postloop_flags;
85 enum { LoopNotRCEChecked = 0, LoopRCEChecked = 1, RCEPostLoop = 2 };
86
87 // Expected trip count from profile data
88 float _profile_trip_cnt;
89
90 public:
91 // Names for edge indices
92 enum { Self=0, EntryControl, LoopBackControl };
93
is_inner_loop() const94 bool is_inner_loop() const { return _loop_flags & InnerLoop; }
set_inner_loop()95 void set_inner_loop() { _loop_flags |= InnerLoop; }
96
range_checks_present() const97 bool range_checks_present() const { return _loop_flags & HasRangeChecks; }
is_multiversioned() const98 bool is_multiversioned() const { return _loop_flags & IsMultiversioned; }
is_vectorized_loop() const99 bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; }
is_partial_peel_loop() const100 bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
set_partial_peel_loop()101 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
partial_peel_has_failed() const102 bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
is_strip_mined() const103 bool is_strip_mined() const { return _loop_flags & StripMined; }
is_profile_trip_failed() const104 bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
is_subword_loop() const105 bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
is_transformed_long_inner_loop() const106 bool is_transformed_long_inner_loop() const { return _loop_flags & TransformedLongInnerLoop; }
is_transformed_long_outer_loop() const107 bool is_transformed_long_outer_loop() const { return _loop_flags & TransformedLongOuterLoop; }
108
mark_partial_peel_failed()109 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
mark_has_reductions()110 void mark_has_reductions() { _loop_flags |= HasReductions; }
mark_was_slp()111 void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
mark_passed_slp()112 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
mark_do_unroll_only()113 void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
mark_loop_vectorized()114 void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
mark_has_atomic_post_loop()115 void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
mark_has_range_checks()116 void mark_has_range_checks() { _loop_flags |= HasRangeChecks; }
mark_is_multiversioned()117 void mark_is_multiversioned() { _loop_flags |= IsMultiversioned; }
mark_strip_mined()118 void mark_strip_mined() { _loop_flags |= StripMined; }
clear_strip_mined()119 void clear_strip_mined() { _loop_flags &= ~StripMined; }
mark_profile_trip_failed()120 void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
mark_subword_loop()121 void mark_subword_loop() { _loop_flags |= SubwordLoop; }
mark_transformed_long_inner_loop()122 void mark_transformed_long_inner_loop() { _loop_flags |= TransformedLongInnerLoop; }
mark_transformed_long_outer_loop()123 void mark_transformed_long_outer_loop() { _loop_flags |= TransformedLongOuterLoop; }
124
unswitch_max()125 int unswitch_max() { return _unswitch_max; }
unswitch_count()126 int unswitch_count() { return _unswitch_count; }
127
has_been_range_checked() const128 int has_been_range_checked() const { return _postloop_flags & LoopRCEChecked; }
set_has_been_range_checked()129 void set_has_been_range_checked() { _postloop_flags |= LoopRCEChecked; }
is_rce_post_loop() const130 int is_rce_post_loop() const { return _postloop_flags & RCEPostLoop; }
set_is_rce_post_loop()131 void set_is_rce_post_loop() { _postloop_flags |= RCEPostLoop; }
132
set_unswitch_count(int val)133 void set_unswitch_count(int val) {
134 assert (val <= unswitch_max(), "too many unswitches");
135 _unswitch_count = val;
136 }
137
set_profile_trip_cnt(float ptc)138 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
profile_trip_cnt()139 float profile_trip_cnt() { return _profile_trip_cnt; }
140
LoopNode(Node * entry,Node * backedge)141 LoopNode(Node *entry, Node *backedge)
142 : RegionNode(3), _loop_flags(0), _unswitch_count(0),
143 _postloop_flags(0), _profile_trip_cnt(COUNT_UNKNOWN) {
144 init_class_id(Class_Loop);
145 init_req(EntryControl, entry);
146 init_req(LoopBackControl, backedge);
147 }
148
149 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
150 virtual int Opcode() const;
can_be_counted_loop(PhaseTransform * phase) const151 bool can_be_counted_loop(PhaseTransform* phase) const {
152 return req() == 3 && in(0) != NULL &&
153 in(1) != NULL && phase->type(in(1)) != Type::TOP &&
154 in(2) != NULL && phase->type(in(2)) != Type::TOP;
155 }
156 bool is_valid_counted_loop(BasicType bt) const;
157 #ifndef PRODUCT
158 virtual void dump_spec(outputStream *st) const;
159 #endif
160
161 void verify_strip_mined(int expect_skeleton) const NOT_DEBUG_RETURN;
skip_strip_mined(int expect_skeleton=1)162 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; }
outer_loop_tail() const163 virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return NULL; }
outer_loop_end() const164 virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return NULL; }
outer_loop_exit() const165 virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return NULL; }
outer_safepoint() const166 virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return NULL; }
167 };
168
169 //------------------------------Counted Loops----------------------------------
170 // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit
171 // path (and maybe some other exit paths). The trip-counter exit is always
172 // last in the loop. The trip-counter have to stride by a constant;
173 // the exit value is also loop invariant.
174
175 // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The
176 // CountedLoopNode has the incoming loop control and the loop-back-control
177 // which is always the IfTrue before the matching CountedLoopEndNode. The
178 // CountedLoopEndNode has an incoming control (possibly not the
179 // CountedLoopNode if there is control flow in the loop), the post-increment
180 // trip-counter value, and the limit. The trip-counter value is always of
181 // the form (Op old-trip-counter stride). The old-trip-counter is produced
182 // by a Phi connected to the CountedLoopNode. The stride is constant.
183 // The Op is any commutable opcode, including Add, Mul, Xor. The
184 // CountedLoopEndNode also takes in the loop-invariant limit value.
185
186 // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the
187 // loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes
188 // via the old-trip-counter from the Op node.
189
190 //------------------------------CountedLoopNode--------------------------------
191 // CountedLoopNodes head simple counted loops. CountedLoopNodes have as
192 // inputs the incoming loop-start control and the loop-back control, so they
193 // act like RegionNodes. They also take in the initial trip counter, the
194 // loop-invariant stride and the loop-invariant limit value. CountedLoopNodes
195 // produce a loop-body control and the trip counter value. Since
196 // CountedLoopNodes behave like RegionNodes I still have a standard CFG model.
197
198 class BaseCountedLoopNode : public LoopNode {
199 public:
BaseCountedLoopNode(Node * entry,Node * backedge)200 BaseCountedLoopNode(Node *entry, Node *backedge)
201 : LoopNode(entry, backedge) {
202 }
203
init_control() const204 Node *init_control() const { return in(EntryControl); }
back_control() const205 Node *back_control() const { return in(LoopBackControl); }
206
207 Node* init_trip() const;
208 Node* stride() const;
209 bool stride_is_con() const;
210 Node* limit() const;
211 Node* incr() const;
212 Node* phi() const;
213
214 BaseCountedLoopEndNode* loopexit_or_null() const;
215 BaseCountedLoopEndNode* loopexit() const;
216
217 virtual BasicType bt() const = 0;
operates_on(BasicType bt,bool signed_int) const218 virtual bool operates_on(BasicType bt, bool signed_int) const {
219 assert(bt == T_INT || bt == T_LONG, "unsupported");
220 return false;
221 }
222
223 static BaseCountedLoopNode* make(Node* entry, Node* backedge, BasicType bt);
224 };
225
226
227 class CountedLoopNode : public BaseCountedLoopNode {
228 // Size is bigger to hold _main_idx. However, _main_idx does not change
229 // the semantics so it does not appear in the hash & cmp functions.
size_of() const230 virtual uint size_of() const { return sizeof(*this); }
231
232 // For Pre- and Post-loops during debugging ONLY, this holds the index of
233 // the Main CountedLoop. Used to assert that we understand the graph shape.
234 node_idx_t _main_idx;
235
236 // Known trip count calculated by compute_exact_trip_count()
237 uint _trip_count;
238
239 // Log2 of original loop bodies in unrolled loop
240 int _unrolled_count_log2;
241
242 // Node count prior to last unrolling - used to decide if
243 // unroll,optimize,unroll,optimize,... is making progress
244 int _node_count_before_unroll;
245
246 // If slp analysis is performed we record the maximum
247 // vector mapped unroll factor here
248 int _slp_maximum_unroll_factor;
249
250 public:
CountedLoopNode(Node * entry,Node * backedge)251 CountedLoopNode(Node *entry, Node *backedge)
252 : BaseCountedLoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint),
253 _unrolled_count_log2(0), _node_count_before_unroll(0),
254 _slp_maximum_unroll_factor(0) {
255 init_class_id(Class_CountedLoop);
256 // Initialize _trip_count to the largest possible value.
257 // Will be reset (lower) if the loop's trip count is known.
258 }
259
260 virtual int Opcode() const;
261 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
262
loopexit_or_null() const263 CountedLoopEndNode* loopexit_or_null() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); }
loopexit() const264 CountedLoopEndNode* loopexit() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit(); }
265 int stride_con() const;
266
267 // Match increment with optional truncation
268 static Node*
269 match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInteger** trunc_type,
270 BasicType bt);
271
272 // A 'main' loop has a pre-loop and a post-loop. The 'main' loop
273 // can run short a few iterations and may start a few iterations in.
274 // It will be RCE'd and unrolled and aligned.
275
276 // A following 'post' loop will run any remaining iterations. Used
277 // during Range Check Elimination, the 'post' loop will do any final
278 // iterations with full checks. Also used by Loop Unrolling, where
279 // the 'post' loop will do any epilog iterations needed. Basically,
280 // a 'post' loop can not profitably be further unrolled or RCE'd.
281
282 // A preceding 'pre' loop will run at least 1 iteration (to do peeling),
283 // it may do under-flow checks for RCE and may do alignment iterations
284 // so the following main loop 'knows' that it is striding down cache
285 // lines.
286
287 // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
288 // Aligned, may be missing it's pre-loop.
is_normal_loop() const289 bool is_normal_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
is_pre_loop() const290 bool is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; }
is_main_loop() const291 bool is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; }
is_post_loop() const292 bool is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; }
is_reduction_loop() const293 bool is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; }
was_slp_analyzed() const294 bool was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; }
has_passed_slp() const295 bool has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
is_unroll_only() const296 bool is_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; }
is_main_no_pre_loop() const297 bool is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
has_atomic_post_loop() const298 bool has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; }
set_main_no_pre_loop()299 void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
300
main_idx() const301 int main_idx() const { return _main_idx; }
302
303
set_pre_loop(CountedLoopNode * main)304 void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
set_main_loop()305 void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; }
set_post_loop(CountedLoopNode * main)306 void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; }
set_normal_loop()307 void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; }
308
set_trip_count(uint tc)309 void set_trip_count(uint tc) { _trip_count = tc; }
trip_count()310 uint trip_count() { return _trip_count; }
311
has_exact_trip_count() const312 bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; }
set_exact_trip_count(uint tc)313 void set_exact_trip_count(uint tc) {
314 _trip_count = tc;
315 _loop_flags |= HasExactTripCount;
316 }
set_nonexact_trip_count()317 void set_nonexact_trip_count() {
318 _loop_flags &= ~HasExactTripCount;
319 }
set_notpassed_slp()320 void set_notpassed_slp() {
321 _loop_flags &= ~PassedSlpAnalysis;
322 }
323
double_unrolled_count()324 void double_unrolled_count() { _unrolled_count_log2++; }
unrolled_count()325 int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); }
326
set_node_count_before_unroll(int ct)327 void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; }
node_count_before_unroll()328 int node_count_before_unroll() { return _node_count_before_unroll; }
set_slp_max_unroll(int unroll_factor)329 void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; }
slp_max_unroll() const330 int slp_max_unroll() const { return _slp_maximum_unroll_factor; }
331
332 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1);
333 OuterStripMinedLoopNode* outer_loop() const;
334 virtual IfTrueNode* outer_loop_tail() const;
335 virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
336 virtual IfFalseNode* outer_loop_exit() const;
337 virtual SafePointNode* outer_safepoint() const;
338
339 // If this is a main loop in a pre/main/post loop nest, walk over
340 // the predicates that were inserted by
341 // duplicate_predicates()/add_range_check_predicate()
342 static Node* skip_predicates_from_entry(Node* ctrl);
343 Node* skip_predicates();
344
operates_on(BasicType bt,bool signed_int) const345 virtual bool operates_on(BasicType bt, bool signed_int) const {
346 assert(bt == T_INT || bt == T_LONG, "unsupported");
347 return bt == T_INT;
348 }
bt() const349 virtual BasicType bt() const {
350 return T_INT;
351 }
352
353 Node* is_canonical_loop_entry();
354
355 #ifndef PRODUCT
356 virtual void dump_spec(outputStream *st) const;
357 #endif
358 };
359
360 class LongCountedLoopNode : public BaseCountedLoopNode {
361 public:
LongCountedLoopNode(Node * entry,Node * backedge)362 LongCountedLoopNode(Node *entry, Node *backedge)
363 : BaseCountedLoopNode(entry, backedge) {
364 init_class_id(Class_LongCountedLoop);
365 }
366
367 virtual int Opcode() const;
368
operates_on(BasicType bt,bool signed_int) const369 virtual bool operates_on(BasicType bt, bool signed_int) const {
370 assert(bt == T_INT || bt == T_LONG, "unsupported");
371 return bt == T_LONG;
372 }
373
bt() const374 virtual BasicType bt() const {
375 return T_LONG;
376 }
377
loopexit_or_null() const378 LongCountedLoopEndNode* loopexit_or_null() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); }
loopexit() const379 LongCountedLoopEndNode* loopexit() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit(); }
380 jlong stride_con() const;
381 };
382
383
384 //------------------------------CountedLoopEndNode-----------------------------
385 // CountedLoopEndNodes end simple trip counted loops. They act much like
386 // IfNodes.
387
388 class BaseCountedLoopEndNode : public IfNode {
389 public:
390 enum { TestControl, TestValue };
BaseCountedLoopEndNode(Node * control,Node * test,float prob,float cnt)391 BaseCountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
392 : IfNode(control, test, prob, cnt) {
393 init_class_id(Class_BaseCountedLoopEnd);
394 }
395
cmp_node() const396 Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; }
incr() const397 Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; }
limit() const398 Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; }
stride() const399 Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; }
init_trip() const400 Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; }
stride_is_con() const401 bool stride_is_con() const { Node *tmp = stride(); return (tmp != NULL && tmp->is_Con()); }
402
phi() const403 PhiNode* phi() const {
404 Node* tmp = incr();
405 if (tmp && tmp->req() == 3) {
406 Node* phi = tmp->in(1);
407 if (phi->is_Phi()) {
408 return phi->as_Phi();
409 }
410 }
411 return NULL;
412 }
413
loopnode() const414 BaseCountedLoopNode* loopnode() const {
415 // The CountedLoopNode that goes with this CountedLoopEndNode may
416 // have been optimized out by the IGVN so be cautious with the
417 // pattern matching on the graph
418 PhiNode* iv_phi = phi();
419 if (iv_phi == NULL) {
420 return NULL;
421 }
422 Node* ln = iv_phi->in(0);
423 if (!ln->is_BaseCountedLoop() || ln->as_BaseCountedLoop()->loopexit_or_null() != this) {
424 return NULL;
425 }
426 if (!ln->operates_on(bt(), true)) {
427 return NULL;
428 }
429 return ln->as_BaseCountedLoop();
430 }
431
test_trip() const432 BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; }
operates_on(BasicType bt,bool signed_int) const433 virtual bool operates_on(BasicType bt, bool signed_int) const {
434 assert(bt == T_INT || bt == T_LONG, "unsupported");
435 return false;
436 }
437
438 jlong stride_con() const;
439 virtual BasicType bt() const = 0;
440
441 static BaseCountedLoopEndNode* make(Node* control, Node* test, float prob, float cnt, BasicType bt);
442 };
443
444 class CountedLoopEndNode : public BaseCountedLoopEndNode {
445 public:
446
CountedLoopEndNode(Node * control,Node * test,float prob,float cnt)447 CountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
448 : BaseCountedLoopEndNode(control, test, prob, cnt) {
449 init_class_id(Class_CountedLoopEnd);
450 }
451 virtual int Opcode() const;
452
loopnode() const453 CountedLoopNode* loopnode() const {
454 return (CountedLoopNode*) BaseCountedLoopEndNode::loopnode();
455 }
operates_on(BasicType bt,bool signed_int) const456 virtual bool operates_on(BasicType bt, bool signed_int) const {
457 assert(bt == T_INT || bt == T_LONG, "unsupported");
458 return bt == T_INT;
459 }
460
bt() const461 virtual BasicType bt() const {
462 return T_INT;
463 }
464
465 #ifndef PRODUCT
466 virtual void dump_spec(outputStream *st) const;
467 #endif
468 };
469
470 class LongCountedLoopEndNode : public BaseCountedLoopEndNode {
471 public:
LongCountedLoopEndNode(Node * control,Node * test,float prob,float cnt)472 LongCountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
473 : BaseCountedLoopEndNode(control, test, prob, cnt) {
474 init_class_id(Class_LongCountedLoopEnd);
475 }
476
loopnode() const477 LongCountedLoopNode* loopnode() const {
478 return (LongCountedLoopNode*) BaseCountedLoopEndNode::loopnode();
479 }
operates_on(BasicType bt,bool signed_int) const480 virtual bool operates_on(BasicType bt, bool signed_int) const {
481 assert(bt == T_INT || bt == T_LONG, "unsupported");
482 return bt == T_LONG;
483 }
484 virtual int Opcode() const;
485
bt() const486 virtual BasicType bt() const {
487 return T_LONG;
488 }
489 };
490
491
loopexit_or_null() const492 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit_or_null() const {
493 Node* bctrl = back_control();
494 if (bctrl == NULL) return NULL;
495
496 Node* lexit = bctrl->in(0);
497 if (!lexit->is_BaseCountedLoopEnd()) {
498 return NULL;
499 }
500 BaseCountedLoopEndNode* result = lexit->as_BaseCountedLoopEnd();
501 if (!result->operates_on(bt(), true)) {
502 return NULL;
503 }
504 return result;
505 }
506
loopexit() const507 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit() const {
508 BaseCountedLoopEndNode* cle = loopexit_or_null();
509 assert(cle != NULL, "loopexit is NULL");
510 return cle;
511 }
512
init_trip() const513 inline Node* BaseCountedLoopNode::init_trip() const {
514 BaseCountedLoopEndNode* cle = loopexit_or_null();
515 return cle != NULL ? cle->init_trip() : NULL;
516 }
stride() const517 inline Node* BaseCountedLoopNode::stride() const {
518 BaseCountedLoopEndNode* cle = loopexit_or_null();
519 return cle != NULL ? cle->stride() : NULL;
520 }
521
stride_is_con() const522 inline bool BaseCountedLoopNode::stride_is_con() const {
523 BaseCountedLoopEndNode* cle = loopexit_or_null();
524 return cle != NULL && cle->stride_is_con();
525 }
limit() const526 inline Node* BaseCountedLoopNode::limit() const {
527 BaseCountedLoopEndNode* cle = loopexit_or_null();
528 return cle != NULL ? cle->limit() : NULL;
529 }
incr() const530 inline Node* BaseCountedLoopNode::incr() const {
531 BaseCountedLoopEndNode* cle = loopexit_or_null();
532 return cle != NULL ? cle->incr() : NULL;
533 }
phi() const534 inline Node* BaseCountedLoopNode::phi() const {
535 BaseCountedLoopEndNode* cle = loopexit_or_null();
536 return cle != NULL ? cle->phi() : NULL;
537 }
538
539 //------------------------------LoopLimitNode-----------------------------
540 // Counted Loop limit node which represents exact final iterator value:
541 // trip_count = (limit - init_trip + stride - 1)/stride
542 // final_value= trip_count * stride + init_trip.
543 // Use HW instructions to calculate it when it can overflow in integer.
544 // Note, final_value should fit into integer since counted loop has
545 // limit check: limit <= max_int-stride.
546 class LoopLimitNode : public Node {
547 enum { Init=1, Limit=2, Stride=3 };
548 public:
LoopLimitNode(Compile * C,Node * init,Node * limit,Node * stride)549 LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) {
550 // Put it on the Macro nodes list to optimize during macro nodes expansion.
551 init_flags(Flag_is_macro);
552 C->add_macro_node(this);
553 }
554 virtual int Opcode() const;
bottom_type() const555 virtual const Type *bottom_type() const { return TypeInt::INT; }
ideal_reg() const556 virtual uint ideal_reg() const { return Op_RegI; }
557 virtual const Type* Value(PhaseGVN* phase) const;
558 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
559 virtual Node* Identity(PhaseGVN* phase);
560 };
561
562 // Support for strip mining
563 class OuterStripMinedLoopNode : public LoopNode {
564 private:
565 CountedLoopNode* inner_loop() const;
566 public:
OuterStripMinedLoopNode(Compile * C,Node * entry,Node * backedge)567 OuterStripMinedLoopNode(Compile* C, Node *entry, Node *backedge)
568 : LoopNode(entry, backedge) {
569 init_class_id(Class_OuterStripMinedLoop);
570 init_flags(Flag_is_macro);
571 C->add_macro_node(this);
572 }
573
574 virtual int Opcode() const;
575
576 virtual IfTrueNode* outer_loop_tail() const;
577 virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
578 virtual IfFalseNode* outer_loop_exit() const;
579 virtual SafePointNode* outer_safepoint() const;
580 void adjust_strip_mined_loop(PhaseIterGVN* igvn);
581 };
582
583 class OuterStripMinedLoopEndNode : public IfNode {
584 public:
OuterStripMinedLoopEndNode(Node * control,Node * test,float prob,float cnt)585 OuterStripMinedLoopEndNode(Node *control, Node *test, float prob, float cnt)
586 : IfNode(control, test, prob, cnt) {
587 init_class_id(Class_OuterStripMinedLoopEnd);
588 }
589
590 virtual int Opcode() const;
591
592 virtual const Type* Value(PhaseGVN* phase) const;
593 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
594
595 bool is_expanded(PhaseGVN *phase) const;
596 };
597
598 // -----------------------------IdealLoopTree----------------------------------
599 class IdealLoopTree : public ResourceObj {
600 public:
601 IdealLoopTree *_parent; // Parent in loop tree
602 IdealLoopTree *_next; // Next sibling in loop tree
603 IdealLoopTree *_child; // First child in loop tree
604
605 // The head-tail backedge defines the loop.
606 // If a loop has multiple backedges, this is addressed during cleanup where
607 // we peel off the multiple backedges, merging all edges at the bottom and
608 // ensuring that one proper backedge flow into the loop.
609 Node *_head; // Head of loop
610 Node *_tail; // Tail of loop
611 inline Node *tail(); // Handle lazy update of _tail field
612 inline Node *head(); // Handle lazy update of _head field
613 PhaseIdealLoop* _phase;
614 int _local_loop_unroll_limit;
615 int _local_loop_unroll_factor;
616
617 Node_List _body; // Loop body for inner loops
618
619 uint16_t _nest; // Nesting depth
620 uint8_t _irreducible:1, // True if irreducible
621 _has_call:1, // True if has call safepoint
622 _has_sfpt:1, // True if has non-call safepoint
623 _rce_candidate:1; // True if candidate for range check elimination
624
625 Node_List* _safepts; // List of safepoints in this loop
626 Node_List* _required_safept; // A inner loop cannot delete these safepts;
627 bool _allow_optimizations; // Allow loop optimizations
628
IdealLoopTree(PhaseIdealLoop * phase,Node * head,Node * tail)629 IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
630 : _parent(0), _next(0), _child(0),
631 _head(head), _tail(tail),
632 _phase(phase),
633 _local_loop_unroll_limit(0), _local_loop_unroll_factor(0),
634 _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0),
635 _safepts(NULL),
636 _required_safept(NULL),
637 _allow_optimizations(true)
638 {
639 precond(_head != NULL);
640 precond(_tail != NULL);
641 }
642
643 // Is 'l' a member of 'this'?
644 bool is_member(const IdealLoopTree *l) const; // Test for nested membership
645
646 // Set loop nesting depth. Accumulate has_call bits.
647 int set_nest( uint depth );
648
649 // Split out multiple fall-in edges from the loop header. Move them to a
650 // private RegionNode before the loop. This becomes the loop landing pad.
651 void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt );
652
653 // Split out the outermost loop from this shared header.
654 void split_outer_loop( PhaseIdealLoop *phase );
655
656 // Merge all the backedges from the shared header into a private Region.
657 // Feed that region as the one backedge to this loop.
658 void merge_many_backedges( PhaseIdealLoop *phase );
659
660 // Split shared headers and insert loop landing pads.
661 // Insert a LoopNode to replace the RegionNode.
662 // Returns TRUE if loop tree is structurally changed.
663 bool beautify_loops( PhaseIdealLoop *phase );
664
665 // Perform optimization to use the loop predicates for null checks and range checks.
666 // Applies to any loop level (not just the innermost one)
667 bool loop_predication( PhaseIdealLoop *phase);
668
669 // Perform iteration-splitting on inner loops. Split iterations to
670 // avoid range checks or one-shot null checks. Returns false if the
671 // current round of loop opts should stop.
672 bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new );
673
674 // Driver for various flavors of iteration splitting. Returns false
675 // if the current round of loop opts should stop.
676 bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new );
677
678 // Given dominators, try to find loops with calls that must always be
679 // executed (call dominates loop tail). These loops do not need non-call
680 // safepoints (ncsfpt).
681 void check_safepts(VectorSet &visited, Node_List &stack);
682
683 // Allpaths backwards scan from loop tail, terminating each path at first safepoint
684 // encountered.
685 void allpaths_check_safepts(VectorSet &visited, Node_List &stack);
686
687 // Remove safepoints from loop. Optionally keeping one.
688 void remove_safepoints(PhaseIdealLoop* phase, bool keep_one);
689
690 // Convert to counted loops where possible
691 void counted_loop( PhaseIdealLoop *phase );
692
693 // Check for Node being a loop-breaking test
694 Node *is_loop_exit(Node *iff) const;
695
696 // Remove simplistic dead code from loop body
697 void DCE_loop_body();
698
699 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
700 // Replace with a 1-in-10 exit guess.
701 void adjust_loop_exit_prob( PhaseIdealLoop *phase );
702
703 // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
704 // Useful for unrolling loops with NO array accesses.
705 bool policy_peel_only( PhaseIdealLoop *phase ) const;
706
707 // Return TRUE or FALSE if the loop should be unswitched -- clone
708 // loop with an invariant test
709 bool policy_unswitching( PhaseIdealLoop *phase ) const;
710
711 // Micro-benchmark spamming. Remove empty loops.
712 bool do_remove_empty_loop( PhaseIdealLoop *phase );
713
714 // Convert one iteration loop into normal code.
715 bool do_one_iteration_loop( PhaseIdealLoop *phase );
716
717 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
718 // move some loop-invariant test (usually a null-check) before the loop.
719 bool policy_peeling(PhaseIdealLoop *phase);
720
721 uint estimate_peeling(PhaseIdealLoop *phase);
722
723 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
724 // known trip count in the counted loop node.
725 bool policy_maximally_unroll(PhaseIdealLoop *phase) const;
726
727 // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll
728 // if the loop is a counted loop and the loop body is small enough.
729 bool policy_unroll(PhaseIdealLoop *phase);
730
731 // Loop analyses to map to a maximal superword unrolling for vectorization.
732 void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct);
733
734 // Return TRUE or FALSE if the loop should be range-check-eliminated.
735 // Gather a list of IF tests that are dominated by iteration splitting;
736 // also gather the end of the first split and the start of the 2nd split.
737 bool policy_range_check( PhaseIdealLoop *phase ) const;
738
739 // Return TRUE if "iff" is a range check.
740 bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar DEBUG_ONLY(COMMA ProjNode *predicate_proj)) const;
741
742 // Estimate the number of nodes required when cloning a loop (body).
743 uint est_loop_clone_sz(uint factor) const;
744 // Estimate the number of nodes required when unrolling a loop (body).
745 uint est_loop_unroll_sz(uint factor) const;
746
747 // Compute loop trip count if possible
748 void compute_trip_count(PhaseIdealLoop* phase);
749
750 // Compute loop trip count from profile data
751 float compute_profile_trip_cnt_helper(Node* n);
752 void compute_profile_trip_cnt( PhaseIdealLoop *phase );
753
754 // Reassociate invariant expressions.
755 void reassociate_invariants(PhaseIdealLoop *phase);
756 // Reassociate invariant binary expressions.
757 Node* reassociate(Node* n1, PhaseIdealLoop *phase);
758 // Reassociate invariant add and subtract expressions.
759 Node* reassociate_add_sub(Node* n1, int inv1_idx, int inv2_idx, PhaseIdealLoop *phase);
760 // Return nonzero index of invariant operand if invariant and variant
761 // are combined with an associative binary. Helper for reassociate_invariants.
762 int find_invariant(Node* n, PhaseIdealLoop *phase);
763 // Return TRUE if "n" is associative.
764 bool is_associative(Node* n, Node* base=NULL);
765
766 // Return true if n is invariant
767 bool is_invariant(Node* n) const;
768
769 // Put loop body on igvn work list
770 void record_for_igvn();
771
is_root()772 bool is_root() { return _parent == NULL; }
773 // A proper/reducible loop w/o any (occasional) dead back-edge.
is_loop()774 bool is_loop() { return !_irreducible && !tail()->is_top(); }
is_counted()775 bool is_counted() { return is_loop() && _head->is_CountedLoop(); }
is_innermost()776 bool is_innermost() { return is_loop() && _child == NULL; }
777
778 void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
779
780 #ifndef PRODUCT
781 void dump_head() const; // Dump loop head only
782 void dump() const; // Dump this loop recursively
783 void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const;
784 #endif
785
786 private:
787 enum { EMPTY_LOOP_SIZE = 7 }; // Number of nodes in an empty loop.
788
789 // Estimate the number of nodes resulting from control and data flow merge.
790 uint est_loop_flow_merge_sz() const;
791 };
792
793 // -----------------------------PhaseIdealLoop---------------------------------
794 // Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees
795 // into a loop tree. Drives the loop-based transformations on the ideal graph.
796 class PhaseIdealLoop : public PhaseTransform {
797 friend class IdealLoopTree;
798 friend class SuperWord;
799 friend class CountedLoopReserveKit;
800 friend class ShenandoahBarrierC2Support;
801 friend class AutoNodeBudget;
802
803 // Pre-computed def-use info
804 PhaseIterGVN &_igvn;
805
806 // Head of loop tree
807 IdealLoopTree* _ltree_root;
808
809 // Array of pre-order numbers, plus post-visited bit.
810 // ZERO for not pre-visited. EVEN for pre-visited but not post-visited.
811 // ODD for post-visited. Other bits are the pre-order number.
812 uint *_preorders;
813 uint _max_preorder;
814
815 const PhaseIdealLoop* _verify_me;
816 bool _verify_only;
817
818 // Allocate _preorders[] array
allocate_preorders()819 void allocate_preorders() {
820 _max_preorder = C->unique()+8;
821 _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder);
822 memset(_preorders, 0, sizeof(uint) * _max_preorder);
823 }
824
825 // Allocate _preorders[] array
reallocate_preorders()826 void reallocate_preorders() {
827 if ( _max_preorder < C->unique() ) {
828 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique());
829 _max_preorder = C->unique();
830 }
831 memset(_preorders, 0, sizeof(uint) * _max_preorder);
832 }
833
834 // Check to grow _preorders[] array for the case when build_loop_tree_impl()
835 // adds new nodes.
check_grow_preorders()836 void check_grow_preorders( ) {
837 if ( _max_preorder < C->unique() ) {
838 uint newsize = _max_preorder<<1; // double size of array
839 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize);
840 memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder));
841 _max_preorder = newsize;
842 }
843 }
844 // Check for pre-visited. Zero for NOT visited; non-zero for visited.
is_visited(Node * n) const845 int is_visited( Node *n ) const { return _preorders[n->_idx]; }
846 // Pre-order numbers are written to the Nodes array as low-bit-set values.
set_preorder_visited(Node * n,int pre_order)847 void set_preorder_visited( Node *n, int pre_order ) {
848 assert( !is_visited( n ), "already set" );
849 _preorders[n->_idx] = (pre_order<<1);
850 };
851 // Return pre-order number.
get_preorder(Node * n) const852 int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; }
853
854 // Check for being post-visited.
855 // Should be previsited already (checked with assert(is_visited(n))).
is_postvisited(Node * n) const856 int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; }
857
858 // Mark as post visited
set_postvisited(Node * n)859 void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; }
860
861 public:
862 // Set/get control node out. Set lower bit to distinguish from IdealLoopTree
863 // Returns true if "n" is a data node, false if it's a control node.
has_ctrl(Node * n) const864 bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; }
865
866 private:
867 // clear out dead code after build_loop_late
868 Node_List _deadlist;
869
870 // Support for faster execution of get_late_ctrl()/dom_lca()
871 // when a node has many uses and dominator depth is deep.
872 GrowableArray<jlong> _dom_lca_tags;
873 uint _dom_lca_tags_round;
874 void init_dom_lca_tags();
875
876 // Helper for debugging bad dominance relationships
877 bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early);
878
879 Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false);
880
881 // Inline wrapper for frequent cases:
882 // 1) only one use
883 // 2) a use is the same as the current LCA passed as 'n1'
dom_lca_for_get_late_ctrl(Node * lca,Node * n,Node * tag)884 Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) {
885 assert( n->is_CFG(), "" );
886 // Fast-path NULL lca
887 if( lca != NULL && lca != n ) {
888 assert( lca->is_CFG(), "" );
889 // find LCA of all uses
890 n = dom_lca_for_get_late_ctrl_internal( lca, n, tag );
891 }
892 return find_non_split_ctrl(n);
893 }
894 Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
895
896 // Helper function for directing control inputs away from CFG split points.
find_non_split_ctrl(Node * ctrl) const897 Node *find_non_split_ctrl( Node *ctrl ) const {
898 if (ctrl != NULL) {
899 if (ctrl->is_MultiBranch()) {
900 ctrl = ctrl->in(0);
901 }
902 assert(ctrl->is_CFG(), "CFG");
903 }
904 return ctrl;
905 }
906
907 Node* cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
908
909 #ifdef ASSERT
910 void ensure_zero_trip_guard_proj(Node* node, bool is_main_loop);
911 #endif
912 void copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
913 uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre,
914 Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new);
915 void copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
916 uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre,
917 Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new);
918 Node* clone_skeleton_predicate_for_main_loop(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj, Node* control,
919 IdealLoopTree* outer_loop, Node* input_proj);
920 Node* clone_skeleton_predicate_bool(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj, Node* control,
921 IdealLoopTree* outer_loop);
922 static bool skeleton_predicate_has_opaque(IfNode* iff);
923 static void get_skeleton_predicates(Node* predicate, Unique_Node_List& list, bool get_opaque = false);
924 void update_main_loop_skeleton_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con);
925 void insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol);
926 #ifdef ASSERT
927 bool only_has_infinite_loops();
928 #endif
929
930 void log_loop_tree();
931
932 public:
933
igvn() const934 PhaseIterGVN &igvn() const { return _igvn; }
935
has_node(Node * n) const936 bool has_node( Node* n ) const {
937 guarantee(n != NULL, "No Node.");
938 return _nodes[n->_idx] != NULL;
939 }
940 // check if transform created new nodes that need _ctrl recorded
941 Node *get_late_ctrl( Node *n, Node *early );
942 Node *get_early_ctrl( Node *n );
943 Node *get_early_ctrl_for_expensive(Node *n, Node* earliest);
944 void set_early_ctrl(Node* n, bool update_body);
945 void set_subtree_ctrl(Node* n, bool update_body);
set_ctrl(Node * n,Node * ctrl)946 void set_ctrl( Node *n, Node *ctrl ) {
947 assert( !has_node(n) || has_ctrl(n), "" );
948 assert( ctrl->in(0), "cannot set dead control node" );
949 assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" );
950 _nodes.map( n->_idx, (Node*)((intptr_t)ctrl + 1) );
951 }
952 // Set control and update loop membership
set_ctrl_and_loop(Node * n,Node * ctrl)953 void set_ctrl_and_loop(Node* n, Node* ctrl) {
954 IdealLoopTree* old_loop = get_loop(get_ctrl(n));
955 IdealLoopTree* new_loop = get_loop(ctrl);
956 if (old_loop != new_loop) {
957 if (old_loop->_child == NULL) old_loop->_body.yank(n);
958 if (new_loop->_child == NULL) new_loop->_body.push(n);
959 }
960 set_ctrl(n, ctrl);
961 }
962 // Control nodes can be replaced or subsumed. During this pass they
963 // get their replacement Node in slot 1. Instead of updating the block
964 // location of all Nodes in the subsumed block, we lazily do it. As we
965 // pull such a subsumed block out of the array, we write back the final
966 // correct block.
get_ctrl(Node * i)967 Node *get_ctrl( Node *i ) {
968
969 assert(has_node(i), "");
970 Node *n = get_ctrl_no_update(i);
971 _nodes.map( i->_idx, (Node*)((intptr_t)n + 1) );
972 assert(has_node(i) && has_ctrl(i), "");
973 assert(n == find_non_split_ctrl(n), "must return legal ctrl" );
974 return n;
975 }
976 // true if CFG node d dominates CFG node n
977 bool is_dominator(Node *d, Node *n);
978 // return get_ctrl for a data node and self(n) for a CFG node
ctrl_or_self(Node * n)979 Node* ctrl_or_self(Node* n) {
980 if (has_ctrl(n))
981 return get_ctrl(n);
982 else {
983 assert (n->is_CFG(), "must be a CFG node");
984 return n;
985 }
986 }
987
get_ctrl_no_update_helper(Node * i) const988 Node *get_ctrl_no_update_helper(Node *i) const {
989 assert(has_ctrl(i), "should be control, not loop");
990 return (Node*)(((intptr_t)_nodes[i->_idx]) & ~1);
991 }
992
get_ctrl_no_update(Node * i) const993 Node *get_ctrl_no_update(Node *i) const {
994 assert( has_ctrl(i), "" );
995 Node *n = get_ctrl_no_update_helper(i);
996 if (!n->in(0)) {
997 // Skip dead CFG nodes
998 do {
999 n = get_ctrl_no_update_helper(n);
1000 } while (!n->in(0));
1001 n = find_non_split_ctrl(n);
1002 }
1003 return n;
1004 }
1005
1006 // Check for loop being set
1007 // "n" must be a control node. Returns true if "n" is known to be in a loop.
has_loop(Node * n) const1008 bool has_loop( Node *n ) const {
1009 assert(!has_node(n) || !has_ctrl(n), "");
1010 return has_node(n);
1011 }
1012 // Set loop
set_loop(Node * n,IdealLoopTree * loop)1013 void set_loop( Node *n, IdealLoopTree *loop ) {
1014 _nodes.map(n->_idx, (Node*)loop);
1015 }
1016 // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace
1017 // the 'old_node' with 'new_node'. Kill old-node. Add a reference
1018 // from old_node to new_node to support the lazy update. Reference
1019 // replaces loop reference, since that is not needed for dead node.
lazy_update(Node * old_node,Node * new_node)1020 void lazy_update(Node *old_node, Node *new_node) {
1021 assert(old_node != new_node, "no cycles please");
1022 // Re-use the side array slot for this node to provide the
1023 // forwarding pointer.
1024 _nodes.map(old_node->_idx, (Node*)((intptr_t)new_node + 1));
1025 }
lazy_replace(Node * old_node,Node * new_node)1026 void lazy_replace(Node *old_node, Node *new_node) {
1027 _igvn.replace_node(old_node, new_node);
1028 lazy_update(old_node, new_node);
1029 }
1030
1031 private:
1032
1033 // Place 'n' in some loop nest, where 'n' is a CFG node
1034 void build_loop_tree();
1035 int build_loop_tree_impl( Node *n, int pre_order );
1036 // Insert loop into the existing loop tree. 'innermost' is a leaf of the
1037 // loop tree, not the root.
1038 IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost );
1039
1040 // Place Data nodes in some loop nest
1041 void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
1042 void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
1043 void build_loop_late_post_work(Node* n, bool pinned);
1044 void build_loop_late_post(Node* n);
1045 void verify_strip_mined_scheduling(Node *n, Node* least);
1046
1047 // Array of immediate dominance info for each CFG node indexed by node idx
1048 private:
1049 uint _idom_size;
1050 Node **_idom; // Array of immediate dominators
1051 uint *_dom_depth; // Used for fast LCA test
1052 GrowableArray<uint>* _dom_stk; // For recomputation of dom depth
1053
1054 // build the loop tree and perform any requested optimizations
1055 void build_and_optimize(LoopOptsMode mode);
1056
1057 // Dominators for the sea of nodes
1058 void Dominators();
1059
1060 // Compute the Ideal Node to Loop mapping
PhaseIdealLoop(PhaseIterGVN & igvn,LoopOptsMode mode)1061 PhaseIdealLoop(PhaseIterGVN& igvn, LoopOptsMode mode) :
1062 PhaseTransform(Ideal_Loop),
1063 _igvn(igvn),
1064 _verify_me(nullptr),
1065 _verify_only(false),
1066 _nodes_required(UINT_MAX) {
1067 assert(mode != LoopOptsVerify, "wrong constructor to verify IdealLoop");
1068 build_and_optimize(mode);
1069 }
1070
1071 #ifndef PRODUCT
1072 // Verify that verify_me made the same decisions as a fresh run
1073 // or only verify that the graph is valid if verify_me is null.
PhaseIdealLoop(PhaseIterGVN & igvn,const PhaseIdealLoop * verify_me=nullptr)1074 PhaseIdealLoop(PhaseIterGVN& igvn, const PhaseIdealLoop* verify_me = nullptr) :
1075 PhaseTransform(Ideal_Loop),
1076 _igvn(igvn),
1077 _verify_me(verify_me),
1078 _verify_only(verify_me == nullptr),
1079 _nodes_required(UINT_MAX) {
1080 build_and_optimize(LoopOptsVerify);
1081 }
1082 #endif
1083
1084 public:
idom_no_update(Node * d) const1085 Node* idom_no_update(Node* d) const {
1086 return idom_no_update(d->_idx);
1087 }
1088
idom_no_update(uint didx) const1089 Node* idom_no_update(uint didx) const {
1090 assert(didx < _idom_size, "oob");
1091 Node* n = _idom[didx];
1092 assert(n != NULL,"Bad immediate dominator info.");
1093 while (n->in(0) == NULL) { // Skip dead CFG nodes
1094 n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1);
1095 assert(n != NULL,"Bad immediate dominator info.");
1096 }
1097 return n;
1098 }
1099
idom(Node * d) const1100 Node *idom(Node* d) const {
1101 return idom(d->_idx);
1102 }
1103
idom(uint didx) const1104 Node *idom(uint didx) const {
1105 Node *n = idom_no_update(didx);
1106 _idom[didx] = n; // Lazily remove dead CFG nodes from table.
1107 return n;
1108 }
1109
dom_depth(Node * d) const1110 uint dom_depth(Node* d) const {
1111 guarantee(d != NULL, "Null dominator info.");
1112 guarantee(d->_idx < _idom_size, "");
1113 return _dom_depth[d->_idx];
1114 }
1115 void set_idom(Node* d, Node* n, uint dom_depth);
1116 // Locally compute IDOM using dom_lca call
1117 Node *compute_idom( Node *region ) const;
1118 // Recompute dom_depth
1119 void recompute_dom_depth();
1120
1121 // Is safept not required by an outer loop?
1122 bool is_deleteable_safept(Node* sfpt);
1123
1124 // Replace parallel induction variable (parallel to trip counter)
1125 void replace_parallel_iv(IdealLoopTree *loop);
1126
dom_lca(Node * n1,Node * n2) const1127 Node *dom_lca( Node *n1, Node *n2 ) const {
1128 return find_non_split_ctrl(dom_lca_internal(n1, n2));
1129 }
1130 Node *dom_lca_internal( Node *n1, Node *n2 ) const;
1131
1132 // Build and verify the loop tree without modifying the graph. This
1133 // is useful to verify that all inputs properly dominate their uses.
verify(PhaseIterGVN & igvn)1134 static void verify(PhaseIterGVN& igvn) {
1135 #ifdef ASSERT
1136 ResourceMark rm;
1137 Compile::TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
1138 PhaseIdealLoop v(igvn);
1139 #endif
1140 }
1141
1142 // Recommended way to use PhaseIdealLoop.
1143 // Run PhaseIdealLoop in some mode and allocates a local scope for memory allocations.
optimize(PhaseIterGVN & igvn,LoopOptsMode mode)1144 static void optimize(PhaseIterGVN &igvn, LoopOptsMode mode) {
1145 ResourceMark rm;
1146 PhaseIdealLoop v(igvn, mode);
1147
1148 Compile* C = Compile::current();
1149 if (!C->failing()) {
1150 // Cleanup any modified bits
1151 igvn.optimize();
1152
1153 v.log_loop_tree();
1154 }
1155 }
1156
1157 // True if the method has at least 1 irreducible loop
1158 bool _has_irreducible_loops;
1159
1160 // Per-Node transform
transform(Node * n)1161 virtual Node* transform(Node* n) { return NULL; }
1162
1163 Node* loop_exit_control(Node* x, IdealLoopTree* loop);
1164 Node* loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob);
1165 Node* loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr);
1166 Node* loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi);
1167 PhiNode* loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop);
1168
1169 bool is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt);
1170
1171 void long_loop_replace_long_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head);
1172 bool transform_long_counted_loop(IdealLoopTree* loop, Node_List &old_new);
1173 #ifdef ASSERT
1174 bool convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop);
1175 #endif
1176 void add_empty_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop, SafePointNode* sfpt);
1177 SafePointNode* find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop);
1178 IdealLoopTree* insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift);
1179 IdealLoopTree* create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
1180 IdealLoopTree* loop, float cl_prob, float le_fcnt,
1181 Node*& entry_control, Node*& iffalse);
1182
1183 Node* exact_limit( IdealLoopTree *loop );
1184
1185 // Return a post-walked LoopNode
get_loop(Node * n) const1186 IdealLoopTree *get_loop( Node *n ) const {
1187 // Dead nodes have no loop, so return the top level loop instead
1188 if (!has_node(n)) return _ltree_root;
1189 assert(!has_ctrl(n), "");
1190 return (IdealLoopTree*)_nodes[n->_idx];
1191 }
1192
ltree_root() const1193 IdealLoopTree* ltree_root() const { return _ltree_root; }
1194
1195 // Is 'n' a (nested) member of 'loop'?
is_member(const IdealLoopTree * loop,Node * n) const1196 int is_member( const IdealLoopTree *loop, Node *n ) const {
1197 return loop->is_member(get_loop(n)); }
1198
1199 // This is the basic building block of the loop optimizations. It clones an
1200 // entire loop body. It makes an old_new loop body mapping; with this
1201 // mapping you can find the new-loop equivalent to an old-loop node. All
1202 // new-loop nodes are exactly equal to their old-loop counterparts, all
1203 // edges are the same. All exits from the old-loop now have a RegionNode
1204 // that merges the equivalent new-loop path. This is true even for the
1205 // normal "loop-exit" condition. All uses of loop-invariant old-loop values
1206 // now come from (one or more) Phis that merge their new-loop equivalents.
1207 // Parameter side_by_side_idom:
1208 // When side_by_size_idom is NULL, the dominator tree is constructed for
1209 // the clone loop to dominate the original. Used in construction of
1210 // pre-main-post loop sequence.
1211 // When nonnull, the clone and original are side-by-side, both are
1212 // dominated by the passed in side_by_side_idom node. Used in
1213 // construction of unswitched loops.
1214 enum CloneLoopMode {
1215 IgnoreStripMined = 0, // Only clone inner strip mined loop
1216 CloneIncludesStripMined = 1, // clone both inner and outer strip mined loops
1217 ControlAroundStripMined = 2 // Only clone inner strip mined loop,
1218 // result control flow branches
1219 // either to inner clone or outer
1220 // strip mined loop.
1221 };
1222 void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth,
1223 CloneLoopMode mode, Node* side_by_side_idom = NULL);
1224 void clone_loop_handle_data_uses(Node* old, Node_List &old_new,
1225 IdealLoopTree* loop, IdealLoopTree* companion_loop,
1226 Node_List*& split_if_set, Node_List*& split_bool_set,
1227 Node_List*& split_cex_set, Node_List& worklist,
1228 uint new_counter, CloneLoopMode mode);
1229 void clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
1230 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
1231 Node_List& extra_data_nodes);
1232
1233 // If we got the effect of peeling, either by actually peeling or by
1234 // making a pre-loop which must execute at least once, we can remove
1235 // all loop-invariant dominated tests in the main body.
1236 void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new );
1237
1238 // Generate code to do a loop peel for the given loop (and body).
1239 // old_new is a temp array.
1240 void do_peeling( IdealLoopTree *loop, Node_List &old_new );
1241
1242 // Add pre and post loops around the given loop. These loops are used
1243 // during RCE, unrolling and aligning loops.
1244 void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only );
1245
1246 // Add post loop after the given loop.
1247 Node *insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
1248 CountedLoopNode *main_head, CountedLoopEndNode *main_end,
1249 Node *incr, Node *limit, CountedLoopNode *&post_head);
1250
1251 // Add an RCE'd post loop which we will multi-version adapt for run time test path usage
1252 void insert_scalar_rced_post_loop( IdealLoopTree *loop, Node_List &old_new );
1253
1254 // Add a vector post loop between a vector main loop and the current post loop
1255 void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new);
1256 // If Node n lives in the back_ctrl block, we clone a private version of n
1257 // in preheader_ctrl block and return that, otherwise return n.
1258 Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones );
1259
1260 // Take steps to maximally unroll the loop. Peel any odd iterations, then
1261 // unroll to do double iterations. The next round of major loop transforms
1262 // will repeat till the doubled loop body does all remaining iterations in 1
1263 // pass.
1264 void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new );
1265
1266 // Unroll the loop body one step - make each trip do 2 iterations.
1267 void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip );
1268
1269 // Mark vector reduction candidates before loop unrolling
1270 void mark_reductions( IdealLoopTree *loop );
1271
1272 // Return true if exp is a constant times an induction var
1273 bool is_scaled_iv(Node* exp, Node* iv, int* p_scale);
1274
1275 // Return true if exp is a scaled induction var plus (or minus) constant
1276 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
1277
1278 // Enum to determine the action to be performed in create_new_if_for_predicate() when processing phis of UCT regions.
1279 enum class UnswitchingAction {
1280 None, // No special action.
1281 FastLoopCloning, // Need to clone nodes for the fast loop.
1282 SlowLoopRewiring // Need to rewire nodes for the slow loop.
1283 };
1284
1285 // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
1286 ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, Deoptimization::DeoptReason reason,
1287 int opcode, bool if_cont_is_true_proj = true, Node_List* old_new = NULL,
1288 UnswitchingAction unswitching_action = UnswitchingAction::None);
1289
1290 // Clone data nodes for the fast loop while creating a new If with create_new_if_for_predicate.
1291 Node* clone_data_nodes_for_fast_loop(Node* phi_input, ProjNode* uncommon_proj, Node* if_uct, Node_List* old_new);
1292
1293 void register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body = true);
1294
1295 static Node* skip_all_loop_predicates(Node* entry);
1296 static Node* skip_loop_predicates(Node* entry);
1297
1298 // Find a good location to insert a predicate
1299 static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason);
1300 // Find a predicate
1301 static Node* find_predicate(Node* entry);
1302 // Construct a range check for a predicate if
1303 BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl,
1304 int scale, Node* offset,
1305 Node* init, Node* limit, jint stride,
1306 Node* range, bool upper, bool &overflow);
1307
1308 // Implementation of the loop predication to promote checks outside the loop
1309 bool loop_predication_impl(IdealLoopTree *loop);
1310 bool loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* proj, ProjNode *predicate_proj,
1311 CountedLoopNode *cl, ConNode* zero, Invariance& invar,
1312 Deoptimization::DeoptReason reason);
1313 bool loop_predication_should_follow_branches(IdealLoopTree *loop, ProjNode *predicate_proj, float& loop_trip_cnt);
1314 void loop_predication_follow_branches(Node *c, IdealLoopTree *loop, float loop_trip_cnt,
1315 PathFrequency& pf, Node_Stack& stack, VectorSet& seen,
1316 Node_List& if_proj_list);
1317 ProjNode* insert_initial_skeleton_predicate(IfNode* iff, IdealLoopTree *loop,
1318 ProjNode* proj, ProjNode *predicate_proj,
1319 ProjNode* upper_bound_proj,
1320 int scale, Node* offset,
1321 Node* init, Node* limit, jint stride,
1322 Node* rng, bool& overflow,
1323 Deoptimization::DeoptReason reason);
1324 Node* add_range_check_predicate(IdealLoopTree* loop, CountedLoopNode* cl,
1325 Node* predicate_proj, int scale_con, Node* offset,
1326 Node* limit, jint stride_con, Node* value);
1327
1328 // Helper function to collect predicate for eliminating the useless ones
1329 void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
1330 void eliminate_useless_predicates();
1331
1332 // Change the control input of expensive nodes to allow commoning by
1333 // IGVN when it is guaranteed to not result in a more frequent
1334 // execution of the expensive node. Return true if progress.
1335 bool process_expensive_nodes();
1336
1337 // Check whether node has become unreachable
is_node_unreachable(Node * n) const1338 bool is_node_unreachable(Node *n) const {
1339 return !has_node(n) || n->is_unreachable(_igvn);
1340 }
1341
1342 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1343 int do_range_check( IdealLoopTree *loop, Node_List &old_new );
1344
1345 // Check to see if do_range_check(...) cleaned the main loop of range-checks
1346 void has_range_checks(IdealLoopTree *loop);
1347
1348 // Process post loops which have range checks and try to build a multi-version
1349 // guard to safely determine if we can execute the post loop which was RCE'd.
1350 bool multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop);
1351
1352 // Cause the rce'd post loop to optimized away, this happens if we cannot complete multiverioning
1353 void poison_rce_post_loop(IdealLoopTree *rce_loop);
1354
1355 // Create a slow version of the loop by cloning the loop
1356 // and inserting an if to select fast-slow versions.
1357 // Return the inserted if.
1358 IfNode* create_slow_version_of_loop(IdealLoopTree *loop,
1359 Node_List &old_new,
1360 IfNode* unswitch_iff,
1361 CloneLoopMode mode);
1362
1363 // Clone a loop and return the clone head (clone_loop_head).
1364 // Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse,
1365 // This routine was created for usage in CountedLoopReserveKit.
1366 //
1367 // int(1) -> If -> IfTrue -> original_loop_head
1368 // |
1369 // V
1370 // IfFalse -> clone_loop_head (returned by function pointer)
1371 //
1372 LoopNode* create_reserve_version_of_loop(IdealLoopTree *loop, CountedLoopReserveKit* lk);
1373 // Clone loop with an invariant test (that does not exit) and
1374 // insert a clone of the test that selects which version to
1375 // execute.
1376 void do_unswitching (IdealLoopTree *loop, Node_List &old_new);
1377
1378 // Find candidate "if" for unswitching
1379 IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const;
1380
1381 // Range Check Elimination uses this function!
1382 // Constrain the main loop iterations so the affine function:
1383 // low_limit <= scale_con * I + offset < upper_limit
1384 // always holds true. That is, either increase the number of iterations in
1385 // the pre-loop or the post-loop until the condition holds true in the main
1386 // loop. Scale_con, offset and limit are all loop invariant.
1387 void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit);
1388 // Helper function for add_constraint().
1389 Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round);
1390
1391 // Partially peel loop up through last_peel node.
1392 bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1393
1394 // Create a scheduled list of nodes control dependent on ctrl set.
1395 void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched );
1396 // Has a use in the vector set
1397 bool has_use_in_set( Node* n, VectorSet& vset );
1398 // Has use internal to the vector set (ie. not in a phi at the loop head)
1399 bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
1400 // clone "n" for uses that are outside of loop
1401 int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
1402 // clone "n" for special uses that are in the not_peeled region
1403 void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
1404 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
1405 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
1406 void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp );
1407 #ifdef ASSERT
1408 // Validate the loop partition sets: peel and not_peel
1409 bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel );
1410 // Ensure that uses outside of loop are of the right form
1411 bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
1412 uint orig_exit_idx, uint clone_exit_idx);
1413 bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx);
1414 #endif
1415
1416 // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.)
1417 int stride_of_possible_iv( Node* iff );
is_possible_iv_test(Node * iff)1418 bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; }
1419 // Return the (unique) control output node that's in the loop (if it exists.)
1420 Node* stay_in_loop( Node* n, IdealLoopTree *loop);
1421 // Insert a signed compare loop exit cloned from an unsigned compare.
1422 IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop);
1423 void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop);
1424 // Utility to register node "n" with PhaseIdealLoop
1425 void register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth);
1426 // Utility to create an if-projection
1427 ProjNode* proj_clone(ProjNode* p, IfNode* iff);
1428 // Force the iff control output to be the live_proj
1429 Node* short_circuit_if(IfNode* iff, ProjNode* live_proj);
1430 // Insert a region before an if projection
1431 RegionNode* insert_region_before_proj(ProjNode* proj);
1432 // Insert a new if before an if projection
1433 ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj);
1434
1435 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1436 // "Nearly" because all Nodes have been cloned from the original in the loop,
1437 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
1438 // through the Phi recursively, and return a Bool.
1439 Node *clone_iff( PhiNode *phi, IdealLoopTree *loop );
1440 CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop );
1441
1442
1443 // Rework addressing expressions to get the most loop-invariant stuff
1444 // moved out. We'd like to do all associative operators, but it's especially
1445 // important (common) to do address expressions.
1446 Node *remix_address_expressions( Node *n );
1447
1448 // Convert add to muladd to generate MuladdS2I under certain criteria
1449 Node * convert_add_to_muladd(Node * n);
1450
1451 // Attempt to use a conditional move instead of a phi/branch
1452 Node *conditional_move( Node *n );
1453
1454 // Reorganize offset computations to lower register pressure.
1455 // Mostly prevent loop-fallout uses of the pre-incremented trip counter
1456 // (which are then alive with the post-incremented trip counter
1457 // forcing an extra register move)
1458 void reorg_offsets( IdealLoopTree *loop );
1459
1460 // Check for aggressive application of 'split-if' optimization,
1461 // using basic block level info.
1462 void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack);
1463 Node *split_if_with_blocks_pre ( Node *n );
1464 void split_if_with_blocks_post( Node *n );
1465 Node *has_local_phi_input( Node *n );
1466 // Mark an IfNode as being dominated by a prior test,
1467 // without actually altering the CFG (and hence IDOM info).
1468 void dominated_by( Node *prevdom, Node *iff, bool flip = false, bool exclude_loop_predicate = false );
1469
1470 // Split Node 'n' through merge point
1471 Node *split_thru_region( Node *n, Node *region );
1472 // Split Node 'n' through merge point if there is enough win.
1473 Node *split_thru_phi( Node *n, Node *region, int policy );
1474 // Found an If getting its condition-code input from a Phi in the
1475 // same block. Split thru the Region.
1476 void do_split_if( Node *iff );
1477
1478 // Conversion of fill/copy patterns into intrinsic versions
1479 bool do_intrinsify_fill();
1480 bool intrinsify_fill(IdealLoopTree* lpt);
1481 bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1482 Node*& shift, Node*& offset);
1483
1484 private:
1485 // Return a type based on condition control flow
1486 const TypeInt* filtered_type( Node *n, Node* n_ctrl);
filtered_type(Node * n)1487 const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); }
1488 // Helpers for filtered type
1489 const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
1490
1491 // Helper functions
1492 Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1493 Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1494 void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1495 bool split_up( Node *n, Node *blk1, Node *blk2 );
1496 void sink_use( Node *use, Node *post_loop );
1497 Node* place_outside_loop(Node* useblock, IdealLoopTree* loop) const;
1498 Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1499 void try_move_store_after_loop(Node* n);
1500 bool identical_backtoback_ifs(Node *n);
1501 bool can_split_if(Node *n_ctrl);
1502
1503 // Determine if a method is too big for a/another round of split-if, based on
1504 // a magic (approximate) ratio derived from the equally magic constant 35000,
1505 // previously used for this purpose (but without relating to the node limit).
must_throttle_split_if()1506 bool must_throttle_split_if() {
1507 uint threshold = C->max_node_limit() * 2 / 5;
1508 return C->live_nodes() > threshold;
1509 }
1510
1511 // A simplistic node request tracking mechanism, where
1512 // = UINT_MAX Request not valid or made final.
1513 // < UINT_MAX Nodes currently requested (estimate).
1514 uint _nodes_required;
1515
1516 enum { REQUIRE_MIN = 70 };
1517
nodes_required() const1518 uint nodes_required() const { return _nodes_required; }
1519
1520 // Given the _currently_ available number of nodes, check whether there is
1521 // "room" for an additional request or not, considering the already required
1522 // number of nodes. Return TRUE if the new request is exceeding the node
1523 // budget limit, otherwise return FALSE. Note that this interpretation will
1524 // act pessimistic on additional requests when new nodes have already been
1525 // generated since the 'begin'. This behaviour fits with the intention that
1526 // node estimates/requests should be made upfront.
exceeding_node_budget(uint required=0)1527 bool exceeding_node_budget(uint required = 0) {
1528 assert(C->live_nodes() < C->max_node_limit(), "sanity");
1529 uint available = C->max_node_limit() - C->live_nodes();
1530 return available < required + _nodes_required + REQUIRE_MIN;
1531 }
1532
require_nodes(uint require,uint minreq=REQUIRE_MIN)1533 uint require_nodes(uint require, uint minreq = REQUIRE_MIN) {
1534 precond(require > 0);
1535 _nodes_required += MAX2(require, minreq);
1536 return _nodes_required;
1537 }
1538
may_require_nodes(uint require,uint minreq=REQUIRE_MIN)1539 bool may_require_nodes(uint require, uint minreq = REQUIRE_MIN) {
1540 return !exceeding_node_budget(require) && require_nodes(require, minreq) > 0;
1541 }
1542
require_nodes_begin()1543 uint require_nodes_begin() {
1544 assert(_nodes_required == UINT_MAX, "Bad state (begin).");
1545 _nodes_required = 0;
1546 return C->live_nodes();
1547 }
1548
1549 // When a node request is final, optionally check that the requested number
1550 // of nodes was reasonably correct with respect to the number of new nodes
1551 // introduced since the last 'begin'. Always check that we have not exceeded
1552 // the maximum node limit.
require_nodes_final(uint live_at_begin,bool check_estimate)1553 void require_nodes_final(uint live_at_begin, bool check_estimate) {
1554 assert(_nodes_required < UINT_MAX, "Bad state (final).");
1555
1556 #ifdef ASSERT
1557 if (check_estimate) {
1558 // Check that the node budget request was not off by too much (x2).
1559 // Should this be the case we _surely_ need to improve the estimates
1560 // used in our budget calculations.
1561 if (C->live_nodes() - live_at_begin > 2 * _nodes_required) {
1562 log_info(compilation)("Bad node estimate: actual = %d >> request = %d",
1563 C->live_nodes() - live_at_begin, _nodes_required);
1564 }
1565 }
1566 #endif
1567 // Assert that we have stayed within the node budget limit.
1568 assert(C->live_nodes() < C->max_node_limit(),
1569 "Exceeding node budget limit: %d + %d > %d (request = %d)",
1570 C->live_nodes() - live_at_begin, live_at_begin,
1571 C->max_node_limit(), _nodes_required);
1572
1573 _nodes_required = UINT_MAX;
1574 }
1575
1576 // Clone loop predicates to slow and fast loop when unswitching a loop
1577 void clone_predicates_to_unswitched_loop(IdealLoopTree* loop, Node_List& old_new, ProjNode*& iffast_pred, ProjNode*& ifslow_pred);
1578 ProjNode* clone_predicate_to_unswitched_loop(ProjNode* predicate_proj, Node* new_entry, Deoptimization::DeoptReason reason,
1579 Node_List* old_new = NULL);
1580 void clone_skeleton_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new, Deoptimization::DeoptReason reason,
1581 ProjNode* old_predicate_proj, ProjNode* iffast_pred, ProjNode* ifslow_pred);
1582 ProjNode* clone_skeleton_predicate_for_unswitched_loops(Node* iff, ProjNode* predicate, Node* uncommon_proj, Deoptimization::DeoptReason reason,
1583 ProjNode* output_proj, IdealLoopTree* loop);
1584 static void check_created_predicate_for_unswitching(const Node* new_entry) PRODUCT_RETURN;
1585
1586 bool _created_loop_node;
1587 #ifdef ASSERT
1588 void dump_real_LCA(Node* early, Node* wrong_lca);
1589 bool check_idom_chains_intersection(const Node* n, uint& idom_idx_new, uint& idom_idx_other, const Node_List* nodes_seen) const;
1590 #endif
1591
1592 public:
set_created_loop_node()1593 void set_created_loop_node() { _created_loop_node = true; }
created_loop_node()1594 bool created_loop_node() { return _created_loop_node; }
1595 void register_new_node(Node* n, Node* blk);
1596
1597 #ifdef ASSERT
1598 void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA);
1599 #endif
1600
1601 #ifndef PRODUCT
1602 void dump() const;
1603 void dump_idom(Node* n) const;
1604 void dump(IdealLoopTree* loop, uint rpo_idx, Node_List &rpo_list) const;
1605 void verify() const; // Major slow :-)
1606 void verify_compare(Node* n, const PhaseIdealLoop* loop_verify, VectorSet &visited) const;
get_loop_idx(Node * n) const1607 IdealLoopTree* get_loop_idx(Node* n) const {
1608 // Dead nodes have no loop, so return the top level loop instead
1609 return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root;
1610 }
1611 // Print some stats
1612 static void print_statistics();
1613 static int _loop_invokes; // Count of PhaseIdealLoop invokes
1614 static int _loop_work; // Sum of PhaseIdealLoop x _unique
1615 static volatile int _long_loop_candidates;
1616 static volatile int _long_loop_nests;
1617 static volatile int _long_loop_counted_loops;
1618 #endif
1619
1620 void rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const;
1621
1622 void check_long_counted_loop(IdealLoopTree* loop, Node* x) NOT_DEBUG_RETURN;
1623
1624 LoopNode* create_inner_head(IdealLoopTree* loop, LongCountedLoopNode* head, LongCountedLoopEndNode* exit_test);
1625
1626 Node* get_late_ctrl_with_anti_dep(LoadNode* n, Node* early, Node* LCA);
1627
1628 bool ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl);
1629
1630 bool ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop);
1631
1632 Node* compute_early_ctrl(Node* n, Node* n_ctrl);
1633
1634 void try_sink_out_of_loop(Node* n);
1635
1636 bool safe_for_if_replacement(const Node* dom) const;
1637 };
1638
1639
1640 class AutoNodeBudget : public StackObj
1641 {
1642 public:
1643 enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK };
1644
AutoNodeBudget(PhaseIdealLoop * phase,budget_check_t chk=BUDGET_CHECK)1645 AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK)
1646 : _phase(phase),
1647 _check_at_final(chk == BUDGET_CHECK),
1648 _nodes_at_begin(0)
1649 {
1650 precond(_phase != NULL);
1651
1652 _nodes_at_begin = _phase->require_nodes_begin();
1653 }
1654
~AutoNodeBudget()1655 ~AutoNodeBudget() {
1656 #ifndef PRODUCT
1657 if (TraceLoopOpts) {
1658 uint request = _phase->nodes_required();
1659 uint delta = _phase->C->live_nodes() - _nodes_at_begin;
1660
1661 if (request < delta) {
1662 tty->print_cr("Exceeding node budget: %d < %d", request, delta);
1663 } else {
1664 uint const REQUIRE_MIN = PhaseIdealLoop::REQUIRE_MIN;
1665 // Identify the worst estimates as "poor" ones.
1666 if (request > REQUIRE_MIN && delta > 0) {
1667 if ((delta > REQUIRE_MIN && request > 3 * delta) ||
1668 (delta <= REQUIRE_MIN && request > 10 * delta)) {
1669 tty->print_cr("Poor node estimate: %d >> %d", request, delta);
1670 }
1671 }
1672 }
1673 }
1674 #endif // PRODUCT
1675 _phase->require_nodes_final(_nodes_at_begin, _check_at_final);
1676 }
1677
1678 private:
1679 PhaseIdealLoop* _phase;
1680 bool _check_at_final;
1681 uint _nodes_at_begin;
1682 };
1683
1684
1685 // This kit may be used for making of a reserved copy of a loop before this loop
1686 // goes under non-reversible changes.
1687 //
1688 // Function create_reserve() creates a reserved copy (clone) of the loop.
1689 // The reserved copy is created by calling
1690 // PhaseIdealLoop::create_reserve_version_of_loop - see there how
1691 // the original and reserved loops are connected in the outer graph.
1692 // If create_reserve succeeded, it returns 'true' and _has_reserved is set to 'true'.
1693 //
1694 // By default the reserved copy (clone) of the loop is created as dead code - it is
1695 // dominated in the outer loop by this node chain:
1696 // intcon(1)->If->IfFalse->reserved_copy.
1697 // The original loop is dominated by the the same node chain but IfTrue projection:
1698 // intcon(0)->If->IfTrue->original_loop.
1699 //
1700 // In this implementation of CountedLoopReserveKit the ctor includes create_reserve()
1701 // and the dtor, checks _use_new value.
1702 // If _use_new == false, it "switches" control to reserved copy of the loop
1703 // by simple replacing of node intcon(1) with node intcon(0).
1704 //
1705 // Here is a proposed example of usage (see also SuperWord::output in superword.cpp).
1706 //
1707 // void CountedLoopReserveKit_example()
1708 // {
1709 // CountedLoopReserveKit lrk((phase, lpt, DoReserveCopy = true); // create local object
1710 // if (DoReserveCopy && !lrk.has_reserved()) {
1711 // return; //failed to create reserved loop copy
1712 // }
1713 // ...
1714 // //something is wrong, switch to original loop
1715 /// if(something_is_wrong) return; // ~CountedLoopReserveKit makes the switch
1716 // ...
1717 // //everything worked ok, return with the newly modified loop
1718 // lrk.use_new();
1719 // return; // ~CountedLoopReserveKit does nothing once use_new() was called
1720 // }
1721 //
1722 // Keep in mind, that by default if create_reserve() is not followed by use_new()
1723 // the dtor will "switch to the original" loop.
1724 // NOTE. You you modify outside of the original loop this class is no help.
1725 //
1726 class CountedLoopReserveKit {
1727 private:
1728 PhaseIdealLoop* _phase;
1729 IdealLoopTree* _lpt;
1730 LoopNode* _lp;
1731 IfNode* _iff;
1732 LoopNode* _lp_reserved;
1733 bool _has_reserved;
1734 bool _use_new;
1735 const bool _active; //may be set to false in ctor, then the object is dummy
1736
1737 public:
1738 CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active);
1739 ~CountedLoopReserveKit();
use_new()1740 void use_new() {_use_new = true;}
set_iff(IfNode * x)1741 void set_iff(IfNode* x) {_iff = x;}
has_reserved() const1742 bool has_reserved() const { return _active && _has_reserved;}
1743 private:
1744 bool create_reserve();
1745 };// class CountedLoopReserveKit
1746
tail()1747 inline Node* IdealLoopTree::tail() {
1748 // Handle lazy update of _tail field.
1749 if (_tail->in(0) == NULL) {
1750 _tail = _phase->get_ctrl(_tail);
1751 }
1752 return _tail;
1753 }
1754
head()1755 inline Node* IdealLoopTree::head() {
1756 // Handle lazy update of _head field.
1757 if (_head->in(0) == NULL) {
1758 _head = _phase->get_ctrl(_head);
1759 }
1760 return _head;
1761 }
1762
1763 // Iterate over the loop tree using a preorder, left-to-right traversal.
1764 //
1765 // Example that visits all counted loops from within PhaseIdealLoop
1766 //
1767 // for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
1768 // IdealLoopTree* lpt = iter.current();
1769 // if (!lpt->is_counted()) continue;
1770 // ...
1771 class LoopTreeIterator : public StackObj {
1772 private:
1773 IdealLoopTree* _root;
1774 IdealLoopTree* _curnt;
1775
1776 public:
LoopTreeIterator(IdealLoopTree * root)1777 LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {}
1778
done()1779 bool done() { return _curnt == NULL; } // Finished iterating?
1780
1781 void next(); // Advance to next loop tree
1782
current()1783 IdealLoopTree* current() { return _curnt; } // Return current value of iterator.
1784 };
1785
1786 #endif // SHARE_OPTO_LOOPNODE_HPP
1787