1 /*
2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_LOOPNODE_HPP
26 #define SHARE_OPTO_LOOPNODE_HPP
27
28 #include "opto/cfgnode.hpp"
29 #include "opto/multnode.hpp"
30 #include "opto/phaseX.hpp"
31 #include "opto/subnode.hpp"
32 #include "opto/type.hpp"
33
34 class CmpNode;
35 class BaseCountedLoopEndNode;
36 class CountedLoopNode;
37 class IdealLoopTree;
38 class LoopNode;
39 class Node;
40 class OuterStripMinedLoopEndNode;
41 class PathFrequency;
42 class PhaseIdealLoop;
43 class CountedLoopReserveKit;
44 class VectorSet;
45 class Invariance;
46 struct small_cache;
47
48 //
49 // I D E A L I Z E D L O O P S
50 //
51 // Idealized loops are the set of loops I perform more interesting
52 // transformations on, beyond simple hoisting.
53
54 //------------------------------LoopNode---------------------------------------
55 // Simple loop header. Fall in path on left, loop-back path on right.
56 class LoopNode : public RegionNode {
57 // Size is bigger to hold the flags. However, the flags do not change
58 // the semantics so it does not appear in the hash & cmp functions.
size_of() const59 virtual uint size_of() const { return sizeof(*this); }
60 protected:
61 uint _loop_flags;
62 // Names for flag bitfields
63 enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3,
64 MainHasNoPreLoop=4,
65 HasExactTripCount=8,
66 InnerLoop=16,
67 PartialPeelLoop=32,
68 PartialPeelFailed=64,
69 HasReductions=128,
70 WasSlpAnalyzed=256,
71 PassedSlpAnalysis=512,
72 DoUnrollOnly=1024,
73 VectorizedLoop=2048,
74 HasAtomicPostLoop=4096,
75 HasRangeChecks=8192,
76 IsMultiversioned=16384,
77 StripMined=32768,
78 SubwordLoop=65536,
79 ProfileTripFailed=131072,
80 TransformedLongLoop=262144};
81 char _unswitch_count;
82 enum { _unswitch_max=3 };
83 char _postloop_flags;
84 enum { LoopNotRCEChecked = 0, LoopRCEChecked = 1, RCEPostLoop = 2 };
85
86 // Expected trip count from profile data
87 float _profile_trip_cnt;
88
89 public:
90 // Names for edge indices
91 enum { Self=0, EntryControl, LoopBackControl };
92
is_inner_loop() const93 bool is_inner_loop() const { return _loop_flags & InnerLoop; }
set_inner_loop()94 void set_inner_loop() { _loop_flags |= InnerLoop; }
95
range_checks_present() const96 bool range_checks_present() const { return _loop_flags & HasRangeChecks; }
is_multiversioned() const97 bool is_multiversioned() const { return _loop_flags & IsMultiversioned; }
is_vectorized_loop() const98 bool is_vectorized_loop() const { return _loop_flags & VectorizedLoop; }
is_partial_peel_loop() const99 bool is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; }
set_partial_peel_loop()100 void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
partial_peel_has_failed() const101 bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
is_strip_mined() const102 bool is_strip_mined() const { return _loop_flags & StripMined; }
is_profile_trip_failed() const103 bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
is_subword_loop() const104 bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
is_transformed_long_loop() const105 bool is_transformed_long_loop() const { return _loop_flags & TransformedLongLoop; }
106
mark_partial_peel_failed()107 void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
mark_has_reductions()108 void mark_has_reductions() { _loop_flags |= HasReductions; }
mark_was_slp()109 void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
mark_passed_slp()110 void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
mark_do_unroll_only()111 void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
mark_loop_vectorized()112 void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
mark_has_atomic_post_loop()113 void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
mark_has_range_checks()114 void mark_has_range_checks() { _loop_flags |= HasRangeChecks; }
mark_is_multiversioned()115 void mark_is_multiversioned() { _loop_flags |= IsMultiversioned; }
mark_strip_mined()116 void mark_strip_mined() { _loop_flags |= StripMined; }
clear_strip_mined()117 void clear_strip_mined() { _loop_flags &= ~StripMined; }
mark_profile_trip_failed()118 void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
mark_subword_loop()119 void mark_subword_loop() { _loop_flags |= SubwordLoop; }
mark_transformed_long_loop()120 void mark_transformed_long_loop() { _loop_flags |= TransformedLongLoop; }
121
unswitch_max()122 int unswitch_max() { return _unswitch_max; }
unswitch_count()123 int unswitch_count() { return _unswitch_count; }
124
has_been_range_checked() const125 int has_been_range_checked() const { return _postloop_flags & LoopRCEChecked; }
set_has_been_range_checked()126 void set_has_been_range_checked() { _postloop_flags |= LoopRCEChecked; }
is_rce_post_loop() const127 int is_rce_post_loop() const { return _postloop_flags & RCEPostLoop; }
set_is_rce_post_loop()128 void set_is_rce_post_loop() { _postloop_flags |= RCEPostLoop; }
129
set_unswitch_count(int val)130 void set_unswitch_count(int val) {
131 assert (val <= unswitch_max(), "too many unswitches");
132 _unswitch_count = val;
133 }
134
set_profile_trip_cnt(float ptc)135 void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; }
profile_trip_cnt()136 float profile_trip_cnt() { return _profile_trip_cnt; }
137
LoopNode(Node * entry,Node * backedge)138 LoopNode(Node *entry, Node *backedge)
139 : RegionNode(3), _loop_flags(0), _unswitch_count(0),
140 _postloop_flags(0), _profile_trip_cnt(COUNT_UNKNOWN) {
141 init_class_id(Class_Loop);
142 init_req(EntryControl, entry);
143 init_req(LoopBackControl, backedge);
144 }
145
146 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
147 virtual int Opcode() const;
can_be_counted_loop(PhaseTransform * phase) const148 bool can_be_counted_loop(PhaseTransform* phase) const {
149 return req() == 3 && in(0) != NULL &&
150 in(1) != NULL && phase->type(in(1)) != Type::TOP &&
151 in(2) != NULL && phase->type(in(2)) != Type::TOP;
152 }
153 bool is_valid_counted_loop(BasicType bt) const;
154 #ifndef PRODUCT
155 virtual void dump_spec(outputStream *st) const;
156 #endif
157
158 void verify_strip_mined(int expect_skeleton) const NOT_DEBUG_RETURN;
skip_strip_mined(int expect_skeleton=1)159 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; }
outer_loop_tail() const160 virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return NULL; }
outer_loop_end() const161 virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return NULL; }
outer_loop_exit() const162 virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return NULL; }
outer_safepoint() const163 virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return NULL; }
164 };
165
166 //------------------------------Counted Loops----------------------------------
167 // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit
168 // path (and maybe some other exit paths). The trip-counter exit is always
169 // last in the loop. The trip-counter have to stride by a constant;
170 // the exit value is also loop invariant.
171
172 // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The
173 // CountedLoopNode has the incoming loop control and the loop-back-control
174 // which is always the IfTrue before the matching CountedLoopEndNode. The
175 // CountedLoopEndNode has an incoming control (possibly not the
176 // CountedLoopNode if there is control flow in the loop), the post-increment
177 // trip-counter value, and the limit. The trip-counter value is always of
178 // the form (Op old-trip-counter stride). The old-trip-counter is produced
179 // by a Phi connected to the CountedLoopNode. The stride is constant.
180 // The Op is any commutable opcode, including Add, Mul, Xor. The
181 // CountedLoopEndNode also takes in the loop-invariant limit value.
182
183 // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the
184 // loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes
185 // via the old-trip-counter from the Op node.
186
187 //------------------------------CountedLoopNode--------------------------------
188 // CountedLoopNodes head simple counted loops. CountedLoopNodes have as
189 // inputs the incoming loop-start control and the loop-back control, so they
190 // act like RegionNodes. They also take in the initial trip counter, the
191 // loop-invariant stride and the loop-invariant limit value. CountedLoopNodes
192 // produce a loop-body control and the trip counter value. Since
193 // CountedLoopNodes behave like RegionNodes I still have a standard CFG model.
194
195 class BaseCountedLoopNode : public LoopNode {
196 public:
BaseCountedLoopNode(Node * entry,Node * backedge)197 BaseCountedLoopNode(Node *entry, Node *backedge)
198 : LoopNode(entry, backedge) {
199 }
200
init_control() const201 Node *init_control() const { return in(EntryControl); }
back_control() const202 Node *back_control() const { return in(LoopBackControl); }
203
204 Node* init_trip() const;
205 Node* stride() const;
206 bool stride_is_con() const;
207 Node* limit() const;
208 Node* incr() const;
209 Node* phi() const;
210
211 BaseCountedLoopEndNode* loopexit_or_null() const;
212 BaseCountedLoopEndNode* loopexit() const;
213
214 virtual BasicType bt() const = 0;
operates_on(BasicType bt,bool signed_int) const215 virtual bool operates_on(BasicType bt, bool signed_int) const {
216 assert(bt == T_INT || bt == T_LONG, "unsupported");
217 return false;
218 }
219
220 static BaseCountedLoopNode* make(Node* entry, Node* backedge, BasicType bt);
221 };
222
223
224 class CountedLoopNode : public BaseCountedLoopNode {
225 // Size is bigger to hold _main_idx. However, _main_idx does not change
226 // the semantics so it does not appear in the hash & cmp functions.
size_of() const227 virtual uint size_of() const { return sizeof(*this); }
228
229 // For Pre- and Post-loops during debugging ONLY, this holds the index of
230 // the Main CountedLoop. Used to assert that we understand the graph shape.
231 node_idx_t _main_idx;
232
233 // Known trip count calculated by compute_exact_trip_count()
234 uint _trip_count;
235
236 // Log2 of original loop bodies in unrolled loop
237 int _unrolled_count_log2;
238
239 // Node count prior to last unrolling - used to decide if
240 // unroll,optimize,unroll,optimize,... is making progress
241 int _node_count_before_unroll;
242
243 // If slp analysis is performed we record the maximum
244 // vector mapped unroll factor here
245 int _slp_maximum_unroll_factor;
246
247 public:
CountedLoopNode(Node * entry,Node * backedge)248 CountedLoopNode(Node *entry, Node *backedge)
249 : BaseCountedLoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint),
250 _unrolled_count_log2(0), _node_count_before_unroll(0),
251 _slp_maximum_unroll_factor(0) {
252 init_class_id(Class_CountedLoop);
253 // Initialize _trip_count to the largest possible value.
254 // Will be reset (lower) if the loop's trip count is known.
255 }
256
257 virtual int Opcode() const;
258 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
259
loopexit_or_null() const260 CountedLoopEndNode* loopexit_or_null() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); }
loopexit() const261 CountedLoopEndNode* loopexit() const { return (CountedLoopEndNode*) BaseCountedLoopNode::loopexit(); }
262 int stride_con() const;
263
264 // Match increment with optional truncation
265 static Node*
266 match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInteger** trunc_type,
267 BasicType bt);
268
269 // A 'main' loop has a pre-loop and a post-loop. The 'main' loop
270 // can run short a few iterations and may start a few iterations in.
271 // It will be RCE'd and unrolled and aligned.
272
273 // A following 'post' loop will run any remaining iterations. Used
274 // during Range Check Elimination, the 'post' loop will do any final
275 // iterations with full checks. Also used by Loop Unrolling, where
276 // the 'post' loop will do any epilog iterations needed. Basically,
277 // a 'post' loop can not profitably be further unrolled or RCE'd.
278
279 // A preceding 'pre' loop will run at least 1 iteration (to do peeling),
280 // it may do under-flow checks for RCE and may do alignment iterations
281 // so the following main loop 'knows' that it is striding down cache
282 // lines.
283
284 // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
285 // Aligned, may be missing it's pre-loop.
is_normal_loop() const286 bool is_normal_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
is_pre_loop() const287 bool is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; }
is_main_loop() const288 bool is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; }
is_post_loop() const289 bool is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; }
is_reduction_loop() const290 bool is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; }
was_slp_analyzed() const291 bool was_slp_analyzed () const { return (_loop_flags&WasSlpAnalyzed) == WasSlpAnalyzed; }
has_passed_slp() const292 bool has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
is_unroll_only() const293 bool is_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; }
is_main_no_pre_loop() const294 bool is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
has_atomic_post_loop() const295 bool has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; }
set_main_no_pre_loop()296 void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
297
main_idx() const298 int main_idx() const { return _main_idx; }
299
300
set_pre_loop(CountedLoopNode * main)301 void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
set_main_loop()302 void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; }
set_post_loop(CountedLoopNode * main)303 void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; }
set_normal_loop()304 void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; }
305
set_trip_count(uint tc)306 void set_trip_count(uint tc) { _trip_count = tc; }
trip_count()307 uint trip_count() { return _trip_count; }
308
has_exact_trip_count() const309 bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; }
set_exact_trip_count(uint tc)310 void set_exact_trip_count(uint tc) {
311 _trip_count = tc;
312 _loop_flags |= HasExactTripCount;
313 }
set_nonexact_trip_count()314 void set_nonexact_trip_count() {
315 _loop_flags &= ~HasExactTripCount;
316 }
set_notpassed_slp()317 void set_notpassed_slp() {
318 _loop_flags &= ~PassedSlpAnalysis;
319 }
320
double_unrolled_count()321 void double_unrolled_count() { _unrolled_count_log2++; }
unrolled_count()322 int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); }
323
set_node_count_before_unroll(int ct)324 void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; }
node_count_before_unroll()325 int node_count_before_unroll() { return _node_count_before_unroll; }
set_slp_max_unroll(int unroll_factor)326 void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; }
slp_max_unroll() const327 int slp_max_unroll() const { return _slp_maximum_unroll_factor; }
328
329 virtual LoopNode* skip_strip_mined(int expect_skeleton = 1);
330 OuterStripMinedLoopNode* outer_loop() const;
331 virtual IfTrueNode* outer_loop_tail() const;
332 virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
333 virtual IfFalseNode* outer_loop_exit() const;
334 virtual SafePointNode* outer_safepoint() const;
335
336 // If this is a main loop in a pre/main/post loop nest, walk over
337 // the predicates that were inserted by
338 // duplicate_predicates()/add_range_check_predicate()
339 static Node* skip_predicates_from_entry(Node* ctrl);
340 Node* skip_predicates();
341
operates_on(BasicType bt,bool signed_int) const342 virtual bool operates_on(BasicType bt, bool signed_int) const {
343 assert(bt == T_INT || bt == T_LONG, "unsupported");
344 return bt == T_INT;
345 }
bt() const346 virtual BasicType bt() const {
347 return T_INT;
348 }
349
350 #ifndef PRODUCT
351 virtual void dump_spec(outputStream *st) const;
352 #endif
353 };
354
355 class LongCountedLoopNode : public BaseCountedLoopNode {
356 public:
LongCountedLoopNode(Node * entry,Node * backedge)357 LongCountedLoopNode(Node *entry, Node *backedge)
358 : BaseCountedLoopNode(entry, backedge) {
359 init_class_id(Class_LongCountedLoop);
360 }
361
362 virtual int Opcode() const;
363
operates_on(BasicType bt,bool signed_int) const364 virtual bool operates_on(BasicType bt, bool signed_int) const {
365 assert(bt == T_INT || bt == T_LONG, "unsupported");
366 return bt == T_LONG;
367 }
368
bt() const369 virtual BasicType bt() const {
370 return T_LONG;
371 }
372
loopexit_or_null() const373 LongCountedLoopEndNode* loopexit_or_null() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit_or_null(); }
loopexit() const374 LongCountedLoopEndNode* loopexit() const { return (LongCountedLoopEndNode*) BaseCountedLoopNode::loopexit(); }
375 jlong stride_con() const;
376 };
377
378
379 //------------------------------CountedLoopEndNode-----------------------------
380 // CountedLoopEndNodes end simple trip counted loops. They act much like
381 // IfNodes.
382
383 class BaseCountedLoopEndNode : public IfNode {
384 public:
385 enum { TestControl, TestValue };
BaseCountedLoopEndNode(Node * control,Node * test,float prob,float cnt)386 BaseCountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
387 : IfNode(control, test, prob, cnt) {
388 init_class_id(Class_BaseCountedLoopEnd);
389 }
390
cmp_node() const391 Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; }
incr() const392 Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; }
limit() const393 Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; }
stride() const394 Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; }
init_trip() const395 Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; }
stride_is_con() const396 bool stride_is_con() const { Node *tmp = stride(); return (tmp != NULL && tmp->is_Con()); }
397
phi() const398 PhiNode* phi() const {
399 Node* tmp = incr();
400 if (tmp && tmp->req() == 3) {
401 Node* phi = tmp->in(1);
402 if (phi->is_Phi()) {
403 return phi->as_Phi();
404 }
405 }
406 return NULL;
407 }
408
loopnode() const409 BaseCountedLoopNode* loopnode() const {
410 // The CountedLoopNode that goes with this CountedLoopEndNode may
411 // have been optimized out by the IGVN so be cautious with the
412 // pattern matching on the graph
413 PhiNode* iv_phi = phi();
414 if (iv_phi == NULL) {
415 return NULL;
416 }
417 Node* ln = iv_phi->in(0);
418 if (!ln->is_BaseCountedLoop() || ln->as_BaseCountedLoop()->loopexit_or_null() != this) {
419 return NULL;
420 }
421 if (!ln->operates_on(bt(), true)) {
422 return NULL;
423 }
424 return ln->as_BaseCountedLoop();
425 }
426
test_trip() const427 BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; }
operates_on(BasicType bt,bool signed_int) const428 virtual bool operates_on(BasicType bt, bool signed_int) const {
429 assert(bt == T_INT || bt == T_LONG, "unsupported");
430 return false;
431 }
432
433 jlong stride_con() const;
434 virtual BasicType bt() const = 0;
435
436 static BaseCountedLoopEndNode* make(Node* control, Node* test, float prob, float cnt, BasicType bt);
437 };
438
439 class CountedLoopEndNode : public BaseCountedLoopEndNode {
440 public:
441
CountedLoopEndNode(Node * control,Node * test,float prob,float cnt)442 CountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
443 : BaseCountedLoopEndNode(control, test, prob, cnt) {
444 init_class_id(Class_CountedLoopEnd);
445 }
446 virtual int Opcode() const;
447
loopnode() const448 CountedLoopNode* loopnode() const {
449 return (CountedLoopNode*) BaseCountedLoopEndNode::loopnode();
450 }
operates_on(BasicType bt,bool signed_int) const451 virtual bool operates_on(BasicType bt, bool signed_int) const {
452 assert(bt == T_INT || bt == T_LONG, "unsupported");
453 return bt == T_INT;
454 }
455
bt() const456 virtual BasicType bt() const {
457 return T_INT;
458 }
459
460 #ifndef PRODUCT
461 virtual void dump_spec(outputStream *st) const;
462 #endif
463 };
464
465 class LongCountedLoopEndNode : public BaseCountedLoopEndNode {
466 public:
LongCountedLoopEndNode(Node * control,Node * test,float prob,float cnt)467 LongCountedLoopEndNode(Node *control, Node *test, float prob, float cnt)
468 : BaseCountedLoopEndNode(control, test, prob, cnt) {
469 init_class_id(Class_LongCountedLoopEnd);
470 }
471
loopnode() const472 LongCountedLoopNode* loopnode() const {
473 return (LongCountedLoopNode*) BaseCountedLoopEndNode::loopnode();
474 }
operates_on(BasicType bt,bool signed_int) const475 virtual bool operates_on(BasicType bt, bool signed_int) const {
476 assert(bt == T_INT || bt == T_LONG, "unsupported");
477 return bt == T_LONG;
478 }
479 virtual int Opcode() const;
480
bt() const481 virtual BasicType bt() const {
482 return T_LONG;
483 }
484 };
485
486
loopexit_or_null() const487 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit_or_null() const {
488 Node* bctrl = back_control();
489 if (bctrl == NULL) return NULL;
490
491 Node* lexit = bctrl->in(0);
492 if (!lexit->is_BaseCountedLoopEnd()) {
493 return NULL;
494 }
495 BaseCountedLoopEndNode* result = lexit->as_BaseCountedLoopEnd();
496 if (!result->operates_on(bt(), true)) {
497 return NULL;
498 }
499 return result;
500 }
501
loopexit() const502 inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit() const {
503 BaseCountedLoopEndNode* cle = loopexit_or_null();
504 assert(cle != NULL, "loopexit is NULL");
505 return cle;
506 }
507
init_trip() const508 inline Node* BaseCountedLoopNode::init_trip() const {
509 BaseCountedLoopEndNode* cle = loopexit_or_null();
510 return cle != NULL ? cle->init_trip() : NULL;
511 }
stride() const512 inline Node* BaseCountedLoopNode::stride() const {
513 BaseCountedLoopEndNode* cle = loopexit_or_null();
514 return cle != NULL ? cle->stride() : NULL;
515 }
516
stride_is_con() const517 inline bool BaseCountedLoopNode::stride_is_con() const {
518 BaseCountedLoopEndNode* cle = loopexit_or_null();
519 return cle != NULL && cle->stride_is_con();
520 }
limit() const521 inline Node* BaseCountedLoopNode::limit() const {
522 BaseCountedLoopEndNode* cle = loopexit_or_null();
523 return cle != NULL ? cle->limit() : NULL;
524 }
incr() const525 inline Node* BaseCountedLoopNode::incr() const {
526 BaseCountedLoopEndNode* cle = loopexit_or_null();
527 return cle != NULL ? cle->incr() : NULL;
528 }
phi() const529 inline Node* BaseCountedLoopNode::phi() const {
530 BaseCountedLoopEndNode* cle = loopexit_or_null();
531 return cle != NULL ? cle->phi() : NULL;
532 }
533
534 //------------------------------LoopLimitNode-----------------------------
535 // Counted Loop limit node which represents exact final iterator value:
536 // trip_count = (limit - init_trip + stride - 1)/stride
537 // final_value= trip_count * stride + init_trip.
538 // Use HW instructions to calculate it when it can overflow in integer.
539 // Note, final_value should fit into integer since counted loop has
540 // limit check: limit <= max_int-stride.
541 class LoopLimitNode : public Node {
542 enum { Init=1, Limit=2, Stride=3 };
543 public:
LoopLimitNode(Compile * C,Node * init,Node * limit,Node * stride)544 LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) {
545 // Put it on the Macro nodes list to optimize during macro nodes expansion.
546 init_flags(Flag_is_macro);
547 C->add_macro_node(this);
548 }
549 virtual int Opcode() const;
bottom_type() const550 virtual const Type *bottom_type() const { return TypeInt::INT; }
ideal_reg() const551 virtual uint ideal_reg() const { return Op_RegI; }
552 virtual const Type* Value(PhaseGVN* phase) const;
553 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
554 virtual Node* Identity(PhaseGVN* phase);
555 };
556
557 // Support for strip mining
558 class OuterStripMinedLoopNode : public LoopNode {
559 private:
560 CountedLoopNode* inner_loop() const;
561 public:
OuterStripMinedLoopNode(Compile * C,Node * entry,Node * backedge)562 OuterStripMinedLoopNode(Compile* C, Node *entry, Node *backedge)
563 : LoopNode(entry, backedge) {
564 init_class_id(Class_OuterStripMinedLoop);
565 init_flags(Flag_is_macro);
566 C->add_macro_node(this);
567 }
568
569 virtual int Opcode() const;
570
571 virtual IfTrueNode* outer_loop_tail() const;
572 virtual OuterStripMinedLoopEndNode* outer_loop_end() const;
573 virtual IfFalseNode* outer_loop_exit() const;
574 virtual SafePointNode* outer_safepoint() const;
575 void adjust_strip_mined_loop(PhaseIterGVN* igvn);
576 };
577
578 class OuterStripMinedLoopEndNode : public IfNode {
579 public:
OuterStripMinedLoopEndNode(Node * control,Node * test,float prob,float cnt)580 OuterStripMinedLoopEndNode(Node *control, Node *test, float prob, float cnt)
581 : IfNode(control, test, prob, cnt) {
582 init_class_id(Class_OuterStripMinedLoopEnd);
583 }
584
585 virtual int Opcode() const;
586
587 virtual const Type* Value(PhaseGVN* phase) const;
588 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
589
590 bool is_expanded(PhaseGVN *phase) const;
591 };
592
593 // -----------------------------IdealLoopTree----------------------------------
594 class IdealLoopTree : public ResourceObj {
595 public:
596 IdealLoopTree *_parent; // Parent in loop tree
597 IdealLoopTree *_next; // Next sibling in loop tree
598 IdealLoopTree *_child; // First child in loop tree
599
600 // The head-tail backedge defines the loop.
601 // If a loop has multiple backedges, this is addressed during cleanup where
602 // we peel off the multiple backedges, merging all edges at the bottom and
603 // ensuring that one proper backedge flow into the loop.
604 Node *_head; // Head of loop
605 Node *_tail; // Tail of loop
606 inline Node *tail(); // Handle lazy update of _tail field
607 inline Node *head(); // Handle lazy update of _head field
608 PhaseIdealLoop* _phase;
609 int _local_loop_unroll_limit;
610 int _local_loop_unroll_factor;
611
612 Node_List _body; // Loop body for inner loops
613
614 uint16_t _nest; // Nesting depth
615 uint8_t _irreducible:1, // True if irreducible
616 _has_call:1, // True if has call safepoint
617 _has_sfpt:1, // True if has non-call safepoint
618 _rce_candidate:1; // True if candidate for range check elimination
619
620 Node_List* _safepts; // List of safepoints in this loop
621 Node_List* _required_safept; // A inner loop cannot delete these safepts;
622 bool _allow_optimizations; // Allow loop optimizations
623
IdealLoopTree(PhaseIdealLoop * phase,Node * head,Node * tail)624 IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
625 : _parent(0), _next(0), _child(0),
626 _head(head), _tail(tail),
627 _phase(phase),
628 _local_loop_unroll_limit(0), _local_loop_unroll_factor(0),
629 _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0),
630 _safepts(NULL),
631 _required_safept(NULL),
632 _allow_optimizations(true)
633 {
634 precond(_head != NULL);
635 precond(_tail != NULL);
636 }
637
638 // Is 'l' a member of 'this'?
639 bool is_member(const IdealLoopTree *l) const; // Test for nested membership
640
641 // Set loop nesting depth. Accumulate has_call bits.
642 int set_nest( uint depth );
643
644 // Split out multiple fall-in edges from the loop header. Move them to a
645 // private RegionNode before the loop. This becomes the loop landing pad.
646 void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt );
647
648 // Split out the outermost loop from this shared header.
649 void split_outer_loop( PhaseIdealLoop *phase );
650
651 // Merge all the backedges from the shared header into a private Region.
652 // Feed that region as the one backedge to this loop.
653 void merge_many_backedges( PhaseIdealLoop *phase );
654
655 // Split shared headers and insert loop landing pads.
656 // Insert a LoopNode to replace the RegionNode.
657 // Returns TRUE if loop tree is structurally changed.
658 bool beautify_loops( PhaseIdealLoop *phase );
659
660 // Perform optimization to use the loop predicates for null checks and range checks.
661 // Applies to any loop level (not just the innermost one)
662 bool loop_predication( PhaseIdealLoop *phase);
663
664 // Perform iteration-splitting on inner loops. Split iterations to
665 // avoid range checks or one-shot null checks. Returns false if the
666 // current round of loop opts should stop.
667 bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new );
668
669 // Driver for various flavors of iteration splitting. Returns false
670 // if the current round of loop opts should stop.
671 bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new );
672
673 // Given dominators, try to find loops with calls that must always be
674 // executed (call dominates loop tail). These loops do not need non-call
675 // safepoints (ncsfpt).
676 void check_safepts(VectorSet &visited, Node_List &stack);
677
678 // Allpaths backwards scan from loop tail, terminating each path at first safepoint
679 // encountered.
680 void allpaths_check_safepts(VectorSet &visited, Node_List &stack);
681
682 // Remove safepoints from loop. Optionally keeping one.
683 void remove_safepoints(PhaseIdealLoop* phase, bool keep_one);
684
685 // Convert to counted loops where possible
686 void counted_loop( PhaseIdealLoop *phase );
687
688 // Check for Node being a loop-breaking test
689 Node *is_loop_exit(Node *iff) const;
690
691 // Remove simplistic dead code from loop body
692 void DCE_loop_body();
693
694 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
695 // Replace with a 1-in-10 exit guess.
696 void adjust_loop_exit_prob( PhaseIdealLoop *phase );
697
698 // Return TRUE or FALSE if the loop should never be RCE'd or aligned.
699 // Useful for unrolling loops with NO array accesses.
700 bool policy_peel_only( PhaseIdealLoop *phase ) const;
701
702 // Return TRUE or FALSE if the loop should be unswitched -- clone
703 // loop with an invariant test
704 bool policy_unswitching( PhaseIdealLoop *phase ) const;
705
706 // Micro-benchmark spamming. Remove empty loops.
707 bool do_remove_empty_loop( PhaseIdealLoop *phase );
708
709 // Convert one iteration loop into normal code.
710 bool do_one_iteration_loop( PhaseIdealLoop *phase );
711
712 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
713 // move some loop-invariant test (usually a null-check) before the loop.
714 bool policy_peeling(PhaseIdealLoop *phase);
715
716 uint estimate_peeling(PhaseIdealLoop *phase);
717
718 // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
719 // known trip count in the counted loop node.
720 bool policy_maximally_unroll(PhaseIdealLoop *phase) const;
721
722 // Return TRUE or FALSE if the loop should be unrolled or not. Apply unroll
723 // if the loop is a counted loop and the loop body is small enough.
724 bool policy_unroll(PhaseIdealLoop *phase);
725
726 // Loop analyses to map to a maximal superword unrolling for vectorization.
727 void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct);
728
729 // Return TRUE or FALSE if the loop should be range-check-eliminated.
730 // Gather a list of IF tests that are dominated by iteration splitting;
731 // also gather the end of the first split and the start of the 2nd split.
732 bool policy_range_check( PhaseIdealLoop *phase ) const;
733
734 // Return TRUE if "iff" is a range check.
735 bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const;
736
737 // Estimate the number of nodes required when cloning a loop (body).
738 uint est_loop_clone_sz(uint factor) const;
739 // Estimate the number of nodes required when unrolling a loop (body).
740 uint est_loop_unroll_sz(uint factor) const;
741
742 // Compute loop trip count if possible
743 void compute_trip_count(PhaseIdealLoop* phase);
744
745 // Compute loop trip count from profile data
746 float compute_profile_trip_cnt_helper(Node* n);
747 void compute_profile_trip_cnt( PhaseIdealLoop *phase );
748
749 // Reassociate invariant expressions.
750 void reassociate_invariants(PhaseIdealLoop *phase);
751 // Reassociate invariant binary expressions.
752 Node* reassociate(Node* n1, PhaseIdealLoop *phase);
753 // Reassociate invariant add and subtract expressions.
754 Node* reassociate_add_sub(Node* n1, int inv1_idx, int inv2_idx, PhaseIdealLoop *phase);
755 // Return nonzero index of invariant operand if invariant and variant
756 // are combined with an associative binary. Helper for reassociate_invariants.
757 int find_invariant(Node* n, PhaseIdealLoop *phase);
758 // Return TRUE if "n" is associative.
759 bool is_associative(Node* n, Node* base=NULL);
760
761 // Return true if n is invariant
762 bool is_invariant(Node* n) const;
763
764 // Put loop body on igvn work list
765 void record_for_igvn();
766
is_root()767 bool is_root() { return _parent == NULL; }
768 // A proper/reducible loop w/o any (occasional) dead back-edge.
is_loop()769 bool is_loop() { return !_irreducible && !tail()->is_top(); }
is_counted()770 bool is_counted() { return is_loop() && _head->is_CountedLoop(); }
is_innermost()771 bool is_innermost() { return is_loop() && _child == NULL; }
772
773 void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
774
775 #ifndef PRODUCT
776 void dump_head() const; // Dump loop head only
777 void dump() const; // Dump this loop recursively
778 void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const;
779 #endif
780
781 private:
782 enum { EMPTY_LOOP_SIZE = 7 }; // Number of nodes in an empty loop.
783
784 // Estimate the number of nodes resulting from control and data flow merge.
785 uint est_loop_flow_merge_sz() const;
786 };
787
788 // -----------------------------PhaseIdealLoop---------------------------------
789 // Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees
790 // into a loop tree. Drives the loop-based transformations on the ideal graph.
791 class PhaseIdealLoop : public PhaseTransform {
792 friend class IdealLoopTree;
793 friend class SuperWord;
794 friend class CountedLoopReserveKit;
795 friend class ShenandoahBarrierC2Support;
796 friend class AutoNodeBudget;
797
798 // Pre-computed def-use info
799 PhaseIterGVN &_igvn;
800
801 // Head of loop tree
802 IdealLoopTree* _ltree_root;
803
804 // Array of pre-order numbers, plus post-visited bit.
805 // ZERO for not pre-visited. EVEN for pre-visited but not post-visited.
806 // ODD for post-visited. Other bits are the pre-order number.
807 uint *_preorders;
808 uint _max_preorder;
809
810 const PhaseIdealLoop* _verify_me;
811 bool _verify_only;
812
813 // Allocate _preorders[] array
allocate_preorders()814 void allocate_preorders() {
815 _max_preorder = C->unique()+8;
816 _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder);
817 memset(_preorders, 0, sizeof(uint) * _max_preorder);
818 }
819
820 // Allocate _preorders[] array
reallocate_preorders()821 void reallocate_preorders() {
822 if ( _max_preorder < C->unique() ) {
823 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique());
824 _max_preorder = C->unique();
825 }
826 memset(_preorders, 0, sizeof(uint) * _max_preorder);
827 }
828
829 // Check to grow _preorders[] array for the case when build_loop_tree_impl()
830 // adds new nodes.
check_grow_preorders()831 void check_grow_preorders( ) {
832 if ( _max_preorder < C->unique() ) {
833 uint newsize = _max_preorder<<1; // double size of array
834 _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize);
835 memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder));
836 _max_preorder = newsize;
837 }
838 }
839 // Check for pre-visited. Zero for NOT visited; non-zero for visited.
is_visited(Node * n) const840 int is_visited( Node *n ) const { return _preorders[n->_idx]; }
841 // Pre-order numbers are written to the Nodes array as low-bit-set values.
set_preorder_visited(Node * n,int pre_order)842 void set_preorder_visited( Node *n, int pre_order ) {
843 assert( !is_visited( n ), "already set" );
844 _preorders[n->_idx] = (pre_order<<1);
845 };
846 // Return pre-order number.
get_preorder(Node * n) const847 int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; }
848
849 // Check for being post-visited.
850 // Should be previsited already (checked with assert(is_visited(n))).
is_postvisited(Node * n) const851 int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; }
852
853 // Mark as post visited
set_postvisited(Node * n)854 void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; }
855
856 public:
857 // Set/get control node out. Set lower bit to distinguish from IdealLoopTree
858 // Returns true if "n" is a data node, false if it's a control node.
has_ctrl(Node * n) const859 bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; }
860
861 private:
862 // clear out dead code after build_loop_late
863 Node_List _deadlist;
864
865 // Support for faster execution of get_late_ctrl()/dom_lca()
866 // when a node has many uses and dominator depth is deep.
867 Node_Array _dom_lca_tags;
868 void init_dom_lca_tags();
869 void clear_dom_lca_tags();
870
871 // Helper for debugging bad dominance relationships
872 bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early);
873
874 Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false);
875
876 // Inline wrapper for frequent cases:
877 // 1) only one use
878 // 2) a use is the same as the current LCA passed as 'n1'
dom_lca_for_get_late_ctrl(Node * lca,Node * n,Node * tag)879 Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) {
880 assert( n->is_CFG(), "" );
881 // Fast-path NULL lca
882 if( lca != NULL && lca != n ) {
883 assert( lca->is_CFG(), "" );
884 // find LCA of all uses
885 n = dom_lca_for_get_late_ctrl_internal( lca, n, tag );
886 }
887 return find_non_split_ctrl(n);
888 }
889 Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
890
891 // Helper function for directing control inputs away from CFG split points.
find_non_split_ctrl(Node * ctrl) const892 Node *find_non_split_ctrl( Node *ctrl ) const {
893 if (ctrl != NULL) {
894 if (ctrl->is_MultiBranch()) {
895 ctrl = ctrl->in(0);
896 }
897 assert(ctrl->is_CFG(), "CFG");
898 }
899 return ctrl;
900 }
901
902 Node* cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
903
904 #ifdef ASSERT
905 void ensure_zero_trip_guard_proj(Node* node, bool is_main_loop);
906 #endif
907 void copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
908 uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre,
909 Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new);
910 void copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
911 uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre,
912 Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new);
913 Node* clone_skeleton_predicate_for_main_loop(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj, Node* control,
914 IdealLoopTree* outer_loop, Node* input_proj);
915 Node* clone_skeleton_predicate_bool(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj, Node* control,
916 IdealLoopTree* outer_loop);
917 bool skeleton_predicate_has_opaque(IfNode* iff);
918 void update_main_loop_skeleton_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con);
919 void insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol);
920 #ifdef ASSERT
921 bool only_has_infinite_loops();
922 #endif
923
924 void log_loop_tree();
925
926 public:
927
igvn() const928 PhaseIterGVN &igvn() const { return _igvn; }
929
930 static bool is_canonical_loop_entry(CountedLoopNode* cl);
931
has_node(Node * n) const932 bool has_node( Node* n ) const {
933 guarantee(n != NULL, "No Node.");
934 return _nodes[n->_idx] != NULL;
935 }
936 // check if transform created new nodes that need _ctrl recorded
937 Node *get_late_ctrl( Node *n, Node *early );
938 Node *get_early_ctrl( Node *n );
939 Node *get_early_ctrl_for_expensive(Node *n, Node* earliest);
940 void set_early_ctrl(Node* n, bool update_body);
941 void set_subtree_ctrl(Node* n, bool update_body);
set_ctrl(Node * n,Node * ctrl)942 void set_ctrl( Node *n, Node *ctrl ) {
943 assert( !has_node(n) || has_ctrl(n), "" );
944 assert( ctrl->in(0), "cannot set dead control node" );
945 assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" );
946 _nodes.map( n->_idx, (Node*)((intptr_t)ctrl + 1) );
947 }
948 // Set control and update loop membership
set_ctrl_and_loop(Node * n,Node * ctrl)949 void set_ctrl_and_loop(Node* n, Node* ctrl) {
950 IdealLoopTree* old_loop = get_loop(get_ctrl(n));
951 IdealLoopTree* new_loop = get_loop(ctrl);
952 if (old_loop != new_loop) {
953 if (old_loop->_child == NULL) old_loop->_body.yank(n);
954 if (new_loop->_child == NULL) new_loop->_body.push(n);
955 }
956 set_ctrl(n, ctrl);
957 }
958 // Control nodes can be replaced or subsumed. During this pass they
959 // get their replacement Node in slot 1. Instead of updating the block
960 // location of all Nodes in the subsumed block, we lazily do it. As we
961 // pull such a subsumed block out of the array, we write back the final
962 // correct block.
get_ctrl(Node * i)963 Node *get_ctrl( Node *i ) {
964
965 assert(has_node(i), "");
966 Node *n = get_ctrl_no_update(i);
967 _nodes.map( i->_idx, (Node*)((intptr_t)n + 1) );
968 assert(has_node(i) && has_ctrl(i), "");
969 assert(n == find_non_split_ctrl(n), "must return legal ctrl" );
970 return n;
971 }
972 // true if CFG node d dominates CFG node n
973 bool is_dominator(Node *d, Node *n);
974 // return get_ctrl for a data node and self(n) for a CFG node
ctrl_or_self(Node * n)975 Node* ctrl_or_self(Node* n) {
976 if (has_ctrl(n))
977 return get_ctrl(n);
978 else {
979 assert (n->is_CFG(), "must be a CFG node");
980 return n;
981 }
982 }
983
get_ctrl_no_update_helper(Node * i) const984 Node *get_ctrl_no_update_helper(Node *i) const {
985 assert(has_ctrl(i), "should be control, not loop");
986 return (Node*)(((intptr_t)_nodes[i->_idx]) & ~1);
987 }
988
get_ctrl_no_update(Node * i) const989 Node *get_ctrl_no_update(Node *i) const {
990 assert( has_ctrl(i), "" );
991 Node *n = get_ctrl_no_update_helper(i);
992 if (!n->in(0)) {
993 // Skip dead CFG nodes
994 do {
995 n = get_ctrl_no_update_helper(n);
996 } while (!n->in(0));
997 n = find_non_split_ctrl(n);
998 }
999 return n;
1000 }
1001
1002 // Check for loop being set
1003 // "n" must be a control node. Returns true if "n" is known to be in a loop.
has_loop(Node * n) const1004 bool has_loop( Node *n ) const {
1005 assert(!has_node(n) || !has_ctrl(n), "");
1006 return has_node(n);
1007 }
1008 // Set loop
set_loop(Node * n,IdealLoopTree * loop)1009 void set_loop( Node *n, IdealLoopTree *loop ) {
1010 _nodes.map(n->_idx, (Node*)loop);
1011 }
1012 // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace
1013 // the 'old_node' with 'new_node'. Kill old-node. Add a reference
1014 // from old_node to new_node to support the lazy update. Reference
1015 // replaces loop reference, since that is not needed for dead node.
lazy_update(Node * old_node,Node * new_node)1016 void lazy_update(Node *old_node, Node *new_node) {
1017 assert(old_node != new_node, "no cycles please");
1018 // Re-use the side array slot for this node to provide the
1019 // forwarding pointer.
1020 _nodes.map(old_node->_idx, (Node*)((intptr_t)new_node + 1));
1021 }
lazy_replace(Node * old_node,Node * new_node)1022 void lazy_replace(Node *old_node, Node *new_node) {
1023 _igvn.replace_node(old_node, new_node);
1024 lazy_update(old_node, new_node);
1025 }
1026
1027 private:
1028
1029 // Place 'n' in some loop nest, where 'n' is a CFG node
1030 void build_loop_tree();
1031 int build_loop_tree_impl( Node *n, int pre_order );
1032 // Insert loop into the existing loop tree. 'innermost' is a leaf of the
1033 // loop tree, not the root.
1034 IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost );
1035
1036 // Place Data nodes in some loop nest
1037 void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
1038 void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
1039 void build_loop_late_post_work(Node* n, bool pinned);
1040 void build_loop_late_post(Node* n);
1041 void verify_strip_mined_scheduling(Node *n, Node* least);
1042
1043 // Array of immediate dominance info for each CFG node indexed by node idx
1044 private:
1045 uint _idom_size;
1046 Node **_idom; // Array of immediate dominators
1047 uint *_dom_depth; // Used for fast LCA test
1048 GrowableArray<uint>* _dom_stk; // For recomputation of dom depth
1049
1050 // build the loop tree and perform any requested optimizations
1051 void build_and_optimize(LoopOptsMode mode);
1052
1053 // Dominators for the sea of nodes
1054 void Dominators();
1055
1056 // Compute the Ideal Node to Loop mapping
PhaseIdealLoop(PhaseIterGVN & igvn,LoopOptsMode mode)1057 PhaseIdealLoop(PhaseIterGVN& igvn, LoopOptsMode mode) :
1058 PhaseTransform(Ideal_Loop),
1059 _igvn(igvn),
1060 _verify_me(nullptr),
1061 _verify_only(false),
1062 _dom_lca_tags(arena()), // Thread::resource_area
1063 _nodes_required(UINT_MAX) {
1064 assert(mode != LoopOptsVerify, "wrong constructor to verify IdealLoop");
1065 build_and_optimize(mode);
1066 }
1067
1068 #ifndef PRODUCT
1069 // Verify that verify_me made the same decisions as a fresh run
1070 // or only verify that the graph is valid if verify_me is null.
PhaseIdealLoop(PhaseIterGVN & igvn,const PhaseIdealLoop * verify_me=nullptr)1071 PhaseIdealLoop(PhaseIterGVN& igvn, const PhaseIdealLoop* verify_me = nullptr) :
1072 PhaseTransform(Ideal_Loop),
1073 _igvn(igvn),
1074 _verify_me(verify_me),
1075 _verify_only(verify_me == nullptr),
1076 _dom_lca_tags(arena()), // Thread::resource_area
1077 _nodes_required(UINT_MAX) {
1078 build_and_optimize(LoopOptsVerify);
1079 }
1080 #endif
1081
1082 public:
idom_no_update(Node * d) const1083 Node* idom_no_update(Node* d) const {
1084 return idom_no_update(d->_idx);
1085 }
1086
idom_no_update(uint didx) const1087 Node* idom_no_update(uint didx) const {
1088 assert(didx < _idom_size, "oob");
1089 Node* n = _idom[didx];
1090 assert(n != NULL,"Bad immediate dominator info.");
1091 while (n->in(0) == NULL) { // Skip dead CFG nodes
1092 n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1);
1093 assert(n != NULL,"Bad immediate dominator info.");
1094 }
1095 return n;
1096 }
1097
idom(Node * d) const1098 Node *idom(Node* d) const {
1099 return idom(d->_idx);
1100 }
1101
idom(uint didx) const1102 Node *idom(uint didx) const {
1103 Node *n = idom_no_update(didx);
1104 _idom[didx] = n; // Lazily remove dead CFG nodes from table.
1105 return n;
1106 }
1107
dom_depth(Node * d) const1108 uint dom_depth(Node* d) const {
1109 guarantee(d != NULL, "Null dominator info.");
1110 guarantee(d->_idx < _idom_size, "");
1111 return _dom_depth[d->_idx];
1112 }
1113 void set_idom(Node* d, Node* n, uint dom_depth);
1114 // Locally compute IDOM using dom_lca call
1115 Node *compute_idom( Node *region ) const;
1116 // Recompute dom_depth
1117 void recompute_dom_depth();
1118
1119 // Is safept not required by an outer loop?
1120 bool is_deleteable_safept(Node* sfpt);
1121
1122 // Replace parallel induction variable (parallel to trip counter)
1123 void replace_parallel_iv(IdealLoopTree *loop);
1124
dom_lca(Node * n1,Node * n2) const1125 Node *dom_lca( Node *n1, Node *n2 ) const {
1126 return find_non_split_ctrl(dom_lca_internal(n1, n2));
1127 }
1128 Node *dom_lca_internal( Node *n1, Node *n2 ) const;
1129
1130 // Build and verify the loop tree without modifying the graph. This
1131 // is useful to verify that all inputs properly dominate their uses.
verify(PhaseIterGVN & igvn)1132 static void verify(PhaseIterGVN& igvn) {
1133 #ifdef ASSERT
1134 ResourceMark rm;
1135 Compile::TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
1136 PhaseIdealLoop v(igvn);
1137 #endif
1138 }
1139
1140 // Recommended way to use PhaseIdealLoop.
1141 // Run PhaseIdealLoop in some mode and allocates a local scope for memory allocations.
optimize(PhaseIterGVN & igvn,LoopOptsMode mode)1142 static void optimize(PhaseIterGVN &igvn, LoopOptsMode mode) {
1143 ResourceMark rm;
1144 PhaseIdealLoop v(igvn, mode);
1145
1146 Compile* C = Compile::current();
1147 if (!C->failing()) {
1148 // Cleanup any modified bits
1149 igvn.optimize();
1150
1151 v.log_loop_tree();
1152 }
1153 }
1154
1155 // True if the method has at least 1 irreducible loop
1156 bool _has_irreducible_loops;
1157
1158 // Per-Node transform
transform(Node * n)1159 virtual Node* transform(Node* n) { return NULL; }
1160
1161 Node* loop_exit_control(Node* x, IdealLoopTree* loop);
1162 Node* loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob);
1163 Node* loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr);
1164 Node* loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi);
1165 PhiNode* loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop);
1166
1167 bool is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt);
1168
1169 void long_loop_replace_long_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head);
1170 bool transform_long_counted_loop(IdealLoopTree* loop, Node_List &old_new);
1171 #ifdef ASSERT
1172 bool convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop);
1173 #endif
1174 void add_empty_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop, SafePointNode* sfpt);
1175 SafePointNode* find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop);
1176 IdealLoopTree* insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift);
1177 IdealLoopTree* create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
1178 IdealLoopTree* loop, float cl_prob, float le_fcnt,
1179 Node*& entry_control, Node*& iffalse);
1180
1181 Node* exact_limit( IdealLoopTree *loop );
1182
1183 // Return a post-walked LoopNode
get_loop(Node * n) const1184 IdealLoopTree *get_loop( Node *n ) const {
1185 // Dead nodes have no loop, so return the top level loop instead
1186 if (!has_node(n)) return _ltree_root;
1187 assert(!has_ctrl(n), "");
1188 return (IdealLoopTree*)_nodes[n->_idx];
1189 }
1190
ltree_root() const1191 IdealLoopTree* ltree_root() const { return _ltree_root; }
1192
1193 // Is 'n' a (nested) member of 'loop'?
is_member(const IdealLoopTree * loop,Node * n) const1194 int is_member( const IdealLoopTree *loop, Node *n ) const {
1195 return loop->is_member(get_loop(n)); }
1196
1197 // This is the basic building block of the loop optimizations. It clones an
1198 // entire loop body. It makes an old_new loop body mapping; with this
1199 // mapping you can find the new-loop equivalent to an old-loop node. All
1200 // new-loop nodes are exactly equal to their old-loop counterparts, all
1201 // edges are the same. All exits from the old-loop now have a RegionNode
1202 // that merges the equivalent new-loop path. This is true even for the
1203 // normal "loop-exit" condition. All uses of loop-invariant old-loop values
1204 // now come from (one or more) Phis that merge their new-loop equivalents.
1205 // Parameter side_by_side_idom:
1206 // When side_by_size_idom is NULL, the dominator tree is constructed for
1207 // the clone loop to dominate the original. Used in construction of
1208 // pre-main-post loop sequence.
1209 // When nonnull, the clone and original are side-by-side, both are
1210 // dominated by the passed in side_by_side_idom node. Used in
1211 // construction of unswitched loops.
1212 enum CloneLoopMode {
1213 IgnoreStripMined = 0, // Only clone inner strip mined loop
1214 CloneIncludesStripMined = 1, // clone both inner and outer strip mined loops
1215 ControlAroundStripMined = 2 // Only clone inner strip mined loop,
1216 // result control flow branches
1217 // either to inner clone or outer
1218 // strip mined loop.
1219 };
1220 void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth,
1221 CloneLoopMode mode, Node* side_by_side_idom = NULL);
1222 void clone_loop_handle_data_uses(Node* old, Node_List &old_new,
1223 IdealLoopTree* loop, IdealLoopTree* companion_loop,
1224 Node_List*& split_if_set, Node_List*& split_bool_set,
1225 Node_List*& split_cex_set, Node_List& worklist,
1226 uint new_counter, CloneLoopMode mode);
1227 void clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
1228 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
1229 Node_List& extra_data_nodes);
1230
1231 // If we got the effect of peeling, either by actually peeling or by
1232 // making a pre-loop which must execute at least once, we can remove
1233 // all loop-invariant dominated tests in the main body.
1234 void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new );
1235
1236 // Generate code to do a loop peel for the given loop (and body).
1237 // old_new is a temp array.
1238 void do_peeling( IdealLoopTree *loop, Node_List &old_new );
1239
1240 // Add pre and post loops around the given loop. These loops are used
1241 // during RCE, unrolling and aligning loops.
1242 void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only );
1243
1244 // Add post loop after the given loop.
1245 Node *insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
1246 CountedLoopNode *main_head, CountedLoopEndNode *main_end,
1247 Node *incr, Node *limit, CountedLoopNode *&post_head);
1248
1249 // Add an RCE'd post loop which we will multi-version adapt for run time test path usage
1250 void insert_scalar_rced_post_loop( IdealLoopTree *loop, Node_List &old_new );
1251
1252 // Add a vector post loop between a vector main loop and the current post loop
1253 void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new);
1254 // If Node n lives in the back_ctrl block, we clone a private version of n
1255 // in preheader_ctrl block and return that, otherwise return n.
1256 Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones );
1257
1258 // Take steps to maximally unroll the loop. Peel any odd iterations, then
1259 // unroll to do double iterations. The next round of major loop transforms
1260 // will repeat till the doubled loop body does all remaining iterations in 1
1261 // pass.
1262 void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new );
1263
1264 // Unroll the loop body one step - make each trip do 2 iterations.
1265 void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip );
1266
1267 // Mark vector reduction candidates before loop unrolling
1268 void mark_reductions( IdealLoopTree *loop );
1269
1270 // Return true if exp is a constant times an induction var
1271 bool is_scaled_iv(Node* exp, Node* iv, int* p_scale);
1272
1273 // Return true if exp is a scaled induction var plus (or minus) constant
1274 bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
1275
1276 // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
1277 ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, Deoptimization::DeoptReason reason,
1278 int opcode, bool if_cont_is_true_proj = true);
1279
1280 void register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body = true);
1281
1282 static Node* skip_all_loop_predicates(Node* entry);
1283 static Node* skip_loop_predicates(Node* entry);
1284
1285 // Find a good location to insert a predicate
1286 static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason);
1287 // Find a predicate
1288 static Node* find_predicate(Node* entry);
1289 // Construct a range check for a predicate if
1290 BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl,
1291 int scale, Node* offset,
1292 Node* init, Node* limit, jint stride,
1293 Node* range, bool upper, bool &overflow);
1294
1295 // Implementation of the loop predication to promote checks outside the loop
1296 bool loop_predication_impl(IdealLoopTree *loop);
1297 bool loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* proj, ProjNode *predicate_proj,
1298 CountedLoopNode *cl, ConNode* zero, Invariance& invar,
1299 Deoptimization::DeoptReason reason);
1300 bool loop_predication_should_follow_branches(IdealLoopTree *loop, ProjNode *predicate_proj, float& loop_trip_cnt);
1301 void loop_predication_follow_branches(Node *c, IdealLoopTree *loop, float loop_trip_cnt,
1302 PathFrequency& pf, Node_Stack& stack, VectorSet& seen,
1303 Node_List& if_proj_list);
1304 ProjNode* insert_initial_skeleton_predicate(IfNode* iff, IdealLoopTree *loop,
1305 ProjNode* proj, ProjNode *predicate_proj,
1306 ProjNode* upper_bound_proj,
1307 int scale, Node* offset,
1308 Node* init, Node* limit, jint stride,
1309 Node* rng, bool& overflow,
1310 Deoptimization::DeoptReason reason);
1311 Node* add_range_check_predicate(IdealLoopTree* loop, CountedLoopNode* cl,
1312 Node* predicate_proj, int scale_con, Node* offset,
1313 Node* limit, jint stride_con, Node* value);
1314
1315 // Helper function to collect predicate for eliminating the useless ones
1316 void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
1317 void eliminate_useless_predicates();
1318
1319 // Change the control input of expensive nodes to allow commoning by
1320 // IGVN when it is guaranteed to not result in a more frequent
1321 // execution of the expensive node. Return true if progress.
1322 bool process_expensive_nodes();
1323
1324 // Check whether node has become unreachable
is_node_unreachable(Node * n) const1325 bool is_node_unreachable(Node *n) const {
1326 return !has_node(n) || n->is_unreachable(_igvn);
1327 }
1328
1329 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1330 int do_range_check( IdealLoopTree *loop, Node_List &old_new );
1331
1332 // Check to see if do_range_check(...) cleaned the main loop of range-checks
1333 void has_range_checks(IdealLoopTree *loop);
1334
1335 // Process post loops which have range checks and try to build a multi-version
1336 // guard to safely determine if we can execute the post loop which was RCE'd.
1337 bool multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop);
1338
1339 // Cause the rce'd post loop to optimized away, this happens if we cannot complete multiverioning
1340 void poison_rce_post_loop(IdealLoopTree *rce_loop);
1341
1342 // Create a slow version of the loop by cloning the loop
1343 // and inserting an if to select fast-slow versions.
1344 ProjNode* create_slow_version_of_loop(IdealLoopTree *loop,
1345 Node_List &old_new,
1346 int opcode,
1347 CloneLoopMode mode);
1348
1349 // Clone a loop and return the clone head (clone_loop_head).
1350 // Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse,
1351 // This routine was created for usage in CountedLoopReserveKit.
1352 //
1353 // int(1) -> If -> IfTrue -> original_loop_head
1354 // |
1355 // V
1356 // IfFalse -> clone_loop_head (returned by function pointer)
1357 //
1358 LoopNode* create_reserve_version_of_loop(IdealLoopTree *loop, CountedLoopReserveKit* lk);
1359 // Clone loop with an invariant test (that does not exit) and
1360 // insert a clone of the test that selects which version to
1361 // execute.
1362 void do_unswitching (IdealLoopTree *loop, Node_List &old_new);
1363
1364 // Find candidate "if" for unswitching
1365 IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const;
1366
1367 // Range Check Elimination uses this function!
1368 // Constrain the main loop iterations so the affine function:
1369 // low_limit <= scale_con * I + offset < upper_limit
1370 // always holds true. That is, either increase the number of iterations in
1371 // the pre-loop or the post-loop until the condition holds true in the main
1372 // loop. Scale_con, offset and limit are all loop invariant.
1373 void add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit);
1374 // Helper function for add_constraint().
1375 Node* adjust_limit(bool reduce, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round);
1376
1377 // Partially peel loop up through last_peel node.
1378 bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
1379
1380 // Create a scheduled list of nodes control dependent on ctrl set.
1381 void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched );
1382 // Has a use in the vector set
1383 bool has_use_in_set( Node* n, VectorSet& vset );
1384 // Has use internal to the vector set (ie. not in a phi at the loop head)
1385 bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
1386 // clone "n" for uses that are outside of loop
1387 int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
1388 // clone "n" for special uses that are in the not_peeled region
1389 void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
1390 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
1391 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
1392 void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp );
1393 #ifdef ASSERT
1394 // Validate the loop partition sets: peel and not_peel
1395 bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel );
1396 // Ensure that uses outside of loop are of the right form
1397 bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
1398 uint orig_exit_idx, uint clone_exit_idx);
1399 bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx);
1400 #endif
1401
1402 // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.)
1403 int stride_of_possible_iv( Node* iff );
is_possible_iv_test(Node * iff)1404 bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; }
1405 // Return the (unique) control output node that's in the loop (if it exists.)
1406 Node* stay_in_loop( Node* n, IdealLoopTree *loop);
1407 // Insert a signed compare loop exit cloned from an unsigned compare.
1408 IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop);
1409 void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop);
1410 // Utility to register node "n" with PhaseIdealLoop
1411 void register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth);
1412 // Utility to create an if-projection
1413 ProjNode* proj_clone(ProjNode* p, IfNode* iff);
1414 // Force the iff control output to be the live_proj
1415 Node* short_circuit_if(IfNode* iff, ProjNode* live_proj);
1416 // Insert a region before an if projection
1417 RegionNode* insert_region_before_proj(ProjNode* proj);
1418 // Insert a new if before an if projection
1419 ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj);
1420
1421 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
1422 // "Nearly" because all Nodes have been cloned from the original in the loop,
1423 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
1424 // through the Phi recursively, and return a Bool.
1425 Node *clone_iff( PhiNode *phi, IdealLoopTree *loop );
1426 CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop );
1427
1428
1429 // Rework addressing expressions to get the most loop-invariant stuff
1430 // moved out. We'd like to do all associative operators, but it's especially
1431 // important (common) to do address expressions.
1432 Node *remix_address_expressions( Node *n );
1433
1434 // Convert add to muladd to generate MuladdS2I under certain criteria
1435 Node * convert_add_to_muladd(Node * n);
1436
1437 // Attempt to use a conditional move instead of a phi/branch
1438 Node *conditional_move( Node *n );
1439
1440 // Reorganize offset computations to lower register pressure.
1441 // Mostly prevent loop-fallout uses of the pre-incremented trip counter
1442 // (which are then alive with the post-incremented trip counter
1443 // forcing an extra register move)
1444 void reorg_offsets( IdealLoopTree *loop );
1445
1446 // Check for aggressive application of 'split-if' optimization,
1447 // using basic block level info.
1448 void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack);
1449 Node *split_if_with_blocks_pre ( Node *n );
1450 void split_if_with_blocks_post( Node *n );
1451 Node *has_local_phi_input( Node *n );
1452 // Mark an IfNode as being dominated by a prior test,
1453 // without actually altering the CFG (and hence IDOM info).
1454 void dominated_by( Node *prevdom, Node *iff, bool flip = false, bool exclude_loop_predicate = false );
1455
1456 // Split Node 'n' through merge point
1457 Node *split_thru_region( Node *n, Node *region );
1458 // Split Node 'n' through merge point if there is enough win.
1459 Node *split_thru_phi( Node *n, Node *region, int policy );
1460 // Found an If getting its condition-code input from a Phi in the
1461 // same block. Split thru the Region.
1462 void do_split_if( Node *iff );
1463
1464 // Conversion of fill/copy patterns into intrinsic versions
1465 bool do_intrinsify_fill();
1466 bool intrinsify_fill(IdealLoopTree* lpt);
1467 bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
1468 Node*& shift, Node*& offset);
1469
1470 private:
1471 // Return a type based on condition control flow
1472 const TypeInt* filtered_type( Node *n, Node* n_ctrl);
filtered_type(Node * n)1473 const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); }
1474 // Helpers for filtered type
1475 const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
1476
1477 // Helper functions
1478 Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
1479 Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
1480 void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
1481 bool split_up( Node *n, Node *blk1, Node *blk2 );
1482 void sink_use( Node *use, Node *post_loop );
1483 Node *place_near_use( Node *useblock ) const;
1484 Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
1485 void try_move_store_after_loop(Node* n);
1486 bool identical_backtoback_ifs(Node *n);
1487 bool can_split_if(Node *n_ctrl);
1488
1489 // Determine if a method is too big for a/another round of split-if, based on
1490 // a magic (approximate) ratio derived from the equally magic constant 35000,
1491 // previously used for this purpose (but without relating to the node limit).
must_throttle_split_if()1492 bool must_throttle_split_if() {
1493 uint threshold = C->max_node_limit() * 2 / 5;
1494 return C->live_nodes() > threshold;
1495 }
1496
1497 // A simplistic node request tracking mechanism, where
1498 // = UINT_MAX Request not valid or made final.
1499 // < UINT_MAX Nodes currently requested (estimate).
1500 uint _nodes_required;
1501
1502 enum { REQUIRE_MIN = 70 };
1503
nodes_required() const1504 uint nodes_required() const { return _nodes_required; }
1505
1506 // Given the _currently_ available number of nodes, check whether there is
1507 // "room" for an additional request or not, considering the already required
1508 // number of nodes. Return TRUE if the new request is exceeding the node
1509 // budget limit, otherwise return FALSE. Note that this interpretation will
1510 // act pessimistic on additional requests when new nodes have already been
1511 // generated since the 'begin'. This behaviour fits with the intention that
1512 // node estimates/requests should be made upfront.
exceeding_node_budget(uint required=0)1513 bool exceeding_node_budget(uint required = 0) {
1514 assert(C->live_nodes() < C->max_node_limit(), "sanity");
1515 uint available = C->max_node_limit() - C->live_nodes();
1516 return available < required + _nodes_required + REQUIRE_MIN;
1517 }
1518
require_nodes(uint require,uint minreq=REQUIRE_MIN)1519 uint require_nodes(uint require, uint minreq = REQUIRE_MIN) {
1520 precond(require > 0);
1521 _nodes_required += MAX2(require, minreq);
1522 return _nodes_required;
1523 }
1524
may_require_nodes(uint require,uint minreq=REQUIRE_MIN)1525 bool may_require_nodes(uint require, uint minreq = REQUIRE_MIN) {
1526 return !exceeding_node_budget(require) && require_nodes(require, minreq) > 0;
1527 }
1528
require_nodes_begin()1529 uint require_nodes_begin() {
1530 assert(_nodes_required == UINT_MAX, "Bad state (begin).");
1531 _nodes_required = 0;
1532 return C->live_nodes();
1533 }
1534
1535 // When a node request is final, optionally check that the requested number
1536 // of nodes was reasonably correct with respect to the number of new nodes
1537 // introduced since the last 'begin'. Always check that we have not exceeded
1538 // the maximum node limit.
require_nodes_final(uint live_at_begin,bool check_estimate)1539 void require_nodes_final(uint live_at_begin, bool check_estimate) {
1540 assert(_nodes_required < UINT_MAX, "Bad state (final).");
1541
1542 #ifdef ASSERT
1543 if (check_estimate) {
1544 // Check that the node budget request was not off by too much (x2).
1545 // Should this be the case we _surely_ need to improve the estimates
1546 // used in our budget calculations.
1547 if (C->live_nodes() - live_at_begin > 2 * _nodes_required) {
1548 log_info(compilation)("Bad node estimate: actual = %d >> request = %d",
1549 C->live_nodes() - live_at_begin, _nodes_required);
1550 }
1551 }
1552 #endif
1553 // Assert that we have stayed within the node budget limit.
1554 assert(C->live_nodes() < C->max_node_limit(),
1555 "Exceeding node budget limit: %d + %d > %d (request = %d)",
1556 C->live_nodes() - live_at_begin, live_at_begin,
1557 C->max_node_limit(), _nodes_required);
1558
1559 _nodes_required = UINT_MAX;
1560 }
1561
1562 // Clone loop predicates to slow and fast loop when unswitching a loop
1563 void clone_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new, ProjNode*& iffast_pred, ProjNode*& ifslow_pred);
1564 ProjNode* clone_predicate_to_unswitched_loop(ProjNode* predicate_proj, Node* new_entry, Deoptimization::DeoptReason reason);
1565 void clone_skeleton_predicates_to_unswitched_loop(IdealLoopTree* loop, const Node_List& old_new, Deoptimization::DeoptReason reason,
1566 ProjNode* old_predicate_proj, ProjNode* iffast_pred, ProjNode* ifslow_pred);
1567 ProjNode* clone_skeleton_predicate_for_unswitched_loops(Node* iff, ProjNode* predicate, Node* uncommon_proj, Deoptimization::DeoptReason reason,
1568 ProjNode* output_proj, IdealLoopTree* loop);
1569 void check_created_predicate_for_unswitching(const Node* new_entry) const PRODUCT_RETURN;
1570
1571 bool _created_loop_node;
1572 #ifdef ASSERT
1573 void dump_real_LCA(Node* early, Node* wrong_lca);
1574 bool check_idom_chains_intersection(const Node* n, uint& idom_idx_new, uint& idom_idx_other, const Node_List* nodes_seen) const;
1575 #endif
1576
1577 public:
set_created_loop_node()1578 void set_created_loop_node() { _created_loop_node = true; }
created_loop_node()1579 bool created_loop_node() { return _created_loop_node; }
1580 void register_new_node(Node* n, Node* blk);
1581
1582 #ifdef ASSERT
1583 void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA);
1584 #endif
1585
1586 #ifndef PRODUCT
1587 void dump() const;
1588 void dump_idom(Node* n) const;
1589 void dump(IdealLoopTree* loop, uint rpo_idx, Node_List &rpo_list) const;
1590 void verify() const; // Major slow :-)
1591 void verify_compare(Node* n, const PhaseIdealLoop* loop_verify, VectorSet &visited) const;
get_loop_idx(Node * n) const1592 IdealLoopTree* get_loop_idx(Node* n) const {
1593 // Dead nodes have no loop, so return the top level loop instead
1594 return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root;
1595 }
1596 // Print some stats
1597 static void print_statistics();
1598 static int _loop_invokes; // Count of PhaseIdealLoop invokes
1599 static int _loop_work; // Sum of PhaseIdealLoop x _unique
1600 static volatile int _long_loop_candidates;
1601 static volatile int _long_loop_nests;
1602 static volatile int _long_loop_counted_loops;
1603 #endif
1604
1605 void rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const;
1606
1607 void check_long_counted_loop(IdealLoopTree* loop, Node* x) NOT_DEBUG_RETURN;
1608
1609 LoopNode* create_inner_head(IdealLoopTree* loop, LongCountedLoopNode* head, LongCountedLoopEndNode* exit_test);
1610
1611 bool is_safe_load_ctrl(Node* ctrl);
1612 };
1613
1614
1615 class AutoNodeBudget : public StackObj
1616 {
1617 public:
1618 enum budget_check_t { BUDGET_CHECK, NO_BUDGET_CHECK };
1619
AutoNodeBudget(PhaseIdealLoop * phase,budget_check_t chk=BUDGET_CHECK)1620 AutoNodeBudget(PhaseIdealLoop* phase, budget_check_t chk = BUDGET_CHECK)
1621 : _phase(phase),
1622 _check_at_final(chk == BUDGET_CHECK),
1623 _nodes_at_begin(0)
1624 {
1625 precond(_phase != NULL);
1626
1627 _nodes_at_begin = _phase->require_nodes_begin();
1628 }
1629
~AutoNodeBudget()1630 ~AutoNodeBudget() {
1631 #ifndef PRODUCT
1632 if (TraceLoopOpts) {
1633 uint request = _phase->nodes_required();
1634 uint delta = _phase->C->live_nodes() - _nodes_at_begin;
1635
1636 if (request < delta) {
1637 tty->print_cr("Exceeding node budget: %d < %d", request, delta);
1638 } else {
1639 uint const REQUIRE_MIN = PhaseIdealLoop::REQUIRE_MIN;
1640 // Identify the worst estimates as "poor" ones.
1641 if (request > REQUIRE_MIN && delta > 0) {
1642 if ((delta > REQUIRE_MIN && request > 3 * delta) ||
1643 (delta <= REQUIRE_MIN && request > 10 * delta)) {
1644 tty->print_cr("Poor node estimate: %d >> %d", request, delta);
1645 }
1646 }
1647 }
1648 }
1649 #endif // PRODUCT
1650 _phase->require_nodes_final(_nodes_at_begin, _check_at_final);
1651 }
1652
1653 private:
1654 PhaseIdealLoop* _phase;
1655 bool _check_at_final;
1656 uint _nodes_at_begin;
1657 };
1658
1659
1660 // This kit may be used for making of a reserved copy of a loop before this loop
1661 // goes under non-reversible changes.
1662 //
1663 // Function create_reserve() creates a reserved copy (clone) of the loop.
1664 // The reserved copy is created by calling
1665 // PhaseIdealLoop::create_reserve_version_of_loop - see there how
1666 // the original and reserved loops are connected in the outer graph.
1667 // If create_reserve succeeded, it returns 'true' and _has_reserved is set to 'true'.
1668 //
1669 // By default the reserved copy (clone) of the loop is created as dead code - it is
1670 // dominated in the outer loop by this node chain:
1671 // intcon(1)->If->IfFalse->reserved_copy.
1672 // The original loop is dominated by the the same node chain but IfTrue projection:
1673 // intcon(0)->If->IfTrue->original_loop.
1674 //
1675 // In this implementation of CountedLoopReserveKit the ctor includes create_reserve()
1676 // and the dtor, checks _use_new value.
1677 // If _use_new == false, it "switches" control to reserved copy of the loop
1678 // by simple replacing of node intcon(1) with node intcon(0).
1679 //
1680 // Here is a proposed example of usage (see also SuperWord::output in superword.cpp).
1681 //
1682 // void CountedLoopReserveKit_example()
1683 // {
1684 // CountedLoopReserveKit lrk((phase, lpt, DoReserveCopy = true); // create local object
1685 // if (DoReserveCopy && !lrk.has_reserved()) {
1686 // return; //failed to create reserved loop copy
1687 // }
1688 // ...
1689 // //something is wrong, switch to original loop
1690 /// if(something_is_wrong) return; // ~CountedLoopReserveKit makes the switch
1691 // ...
1692 // //everything worked ok, return with the newly modified loop
1693 // lrk.use_new();
1694 // return; // ~CountedLoopReserveKit does nothing once use_new() was called
1695 // }
1696 //
1697 // Keep in mind, that by default if create_reserve() is not followed by use_new()
1698 // the dtor will "switch to the original" loop.
1699 // NOTE. You you modify outside of the original loop this class is no help.
1700 //
1701 class CountedLoopReserveKit {
1702 private:
1703 PhaseIdealLoop* _phase;
1704 IdealLoopTree* _lpt;
1705 LoopNode* _lp;
1706 IfNode* _iff;
1707 LoopNode* _lp_reserved;
1708 bool _has_reserved;
1709 bool _use_new;
1710 const bool _active; //may be set to false in ctor, then the object is dummy
1711
1712 public:
1713 CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active);
1714 ~CountedLoopReserveKit();
use_new()1715 void use_new() {_use_new = true;}
set_iff(IfNode * x)1716 void set_iff(IfNode* x) {_iff = x;}
has_reserved() const1717 bool has_reserved() const { return _active && _has_reserved;}
1718 private:
1719 bool create_reserve();
1720 };// class CountedLoopReserveKit
1721
tail()1722 inline Node* IdealLoopTree::tail() {
1723 // Handle lazy update of _tail field.
1724 if (_tail->in(0) == NULL) {
1725 _tail = _phase->get_ctrl(_tail);
1726 }
1727 return _tail;
1728 }
1729
head()1730 inline Node* IdealLoopTree::head() {
1731 // Handle lazy update of _head field.
1732 if (_head->in(0) == NULL) {
1733 _head = _phase->get_ctrl(_head);
1734 }
1735 return _head;
1736 }
1737
1738 // Iterate over the loop tree using a preorder, left-to-right traversal.
1739 //
1740 // Example that visits all counted loops from within PhaseIdealLoop
1741 //
1742 // for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
1743 // IdealLoopTree* lpt = iter.current();
1744 // if (!lpt->is_counted()) continue;
1745 // ...
1746 class LoopTreeIterator : public StackObj {
1747 private:
1748 IdealLoopTree* _root;
1749 IdealLoopTree* _curnt;
1750
1751 public:
LoopTreeIterator(IdealLoopTree * root)1752 LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {}
1753
done()1754 bool done() { return _curnt == NULL; } // Finished iterating?
1755
1756 void next(); // Advance to next loop tree
1757
current()1758 IdealLoopTree* current() { return _curnt; } // Return current value of iterator.
1759 };
1760
1761 #endif // SHARE_OPTO_LOOPNODE_HPP
1762