1 /*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "libadt/vectset.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/connode.hpp"
34 #include "opto/loopnode.hpp"
35 #include "opto/machnode.hpp"
36 #include "opto/matcher.hpp"
37 #include "opto/node.hpp"
38 #include "opto/opcodes.hpp"
39 #include "opto/regmask.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/type.hpp"
42 #include "utilities/copy.hpp"
43 #include "utilities/macros.hpp"
44
45 class RegMask;
46 // #include "phase.hpp"
47 class PhaseTransform;
48 class PhaseGVN;
49
50 // Arena we are currently building Nodes in
51 const uint Node::NotAMachineReg = 0xffff0000;
52
53 #ifndef PRODUCT
54 extern int nodes_created;
55 #endif
56 #ifdef __clang__
57 #pragma clang diagnostic push
58 #pragma GCC diagnostic ignored "-Wuninitialized"
59 #endif
60
61 #ifdef ASSERT
62
63 //-------------------------- construct_node------------------------------------
64 // Set a breakpoint here to identify where a particular node index is built.
verify_construction()65 void Node::verify_construction() {
66 _debug_orig = NULL;
67 int old_debug_idx = Compile::debug_idx();
68 int new_debug_idx = old_debug_idx+1;
69 if (new_debug_idx > 0) {
70 // Arrange that the lowest five decimal digits of _debug_idx
71 // will repeat those of _idx. In case this is somehow pathological,
72 // we continue to assign negative numbers (!) consecutively.
73 const int mod = 100000;
74 int bump = (int)(_idx - new_debug_idx) % mod;
75 if (bump < 0) bump += mod;
76 assert(bump >= 0 && bump < mod, "");
77 new_debug_idx += bump;
78 }
79 Compile::set_debug_idx(new_debug_idx);
80 set_debug_idx( new_debug_idx );
81 assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
82 assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit");
83 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
84 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
85 BREAKPOINT;
86 }
87 #if OPTO_DU_ITERATOR_ASSERT
88 _last_del = NULL;
89 _del_tick = 0;
90 #endif
91 _hash_lock = 0;
92 }
93
94
95 // #ifdef ASSERT ...
96
97 #if OPTO_DU_ITERATOR_ASSERT
sample(const Node * node)98 void DUIterator_Common::sample(const Node* node) {
99 _vdui = VerifyDUIterators;
100 _node = node;
101 _outcnt = node->_outcnt;
102 _del_tick = node->_del_tick;
103 _last = NULL;
104 }
105
verify(const Node * node,bool at_end_ok)106 void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
107 assert(_node == node, "consistent iterator source");
108 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
109 }
110
verify_resync()111 void DUIterator_Common::verify_resync() {
112 // Ensure that the loop body has just deleted the last guy produced.
113 const Node* node = _node;
114 // Ensure that at least one copy of the last-seen edge was deleted.
115 // Note: It is OK to delete multiple copies of the last-seen edge.
116 // Unfortunately, we have no way to verify that all the deletions delete
117 // that same edge. On this point we must use the Honor System.
118 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
119 assert(node->_last_del == _last, "must have deleted the edge just produced");
120 // We liked this deletion, so accept the resulting outcnt and tick.
121 _outcnt = node->_outcnt;
122 _del_tick = node->_del_tick;
123 }
124
reset(const DUIterator_Common & that)125 void DUIterator_Common::reset(const DUIterator_Common& that) {
126 if (this == &that) return; // ignore assignment to self
127 if (!_vdui) {
128 // We need to initialize everything, overwriting garbage values.
129 _last = that._last;
130 _vdui = that._vdui;
131 }
132 // Note: It is legal (though odd) for an iterator over some node x
133 // to be reassigned to iterate over another node y. Some doubly-nested
134 // progress loops depend on being able to do this.
135 const Node* node = that._node;
136 // Re-initialize everything, except _last.
137 _node = node;
138 _outcnt = node->_outcnt;
139 _del_tick = node->_del_tick;
140 }
141
sample(const Node * node)142 void DUIterator::sample(const Node* node) {
143 DUIterator_Common::sample(node); // Initialize the assertion data.
144 _refresh_tick = 0; // No refreshes have happened, as yet.
145 }
146
verify(const Node * node,bool at_end_ok)147 void DUIterator::verify(const Node* node, bool at_end_ok) {
148 DUIterator_Common::verify(node, at_end_ok);
149 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range");
150 }
151
verify_increment()152 void DUIterator::verify_increment() {
153 if (_refresh_tick & 1) {
154 // We have refreshed the index during this loop.
155 // Fix up _idx to meet asserts.
156 if (_idx > _outcnt) _idx = _outcnt;
157 }
158 verify(_node, true);
159 }
160
verify_resync()161 void DUIterator::verify_resync() {
162 // Note: We do not assert on _outcnt, because insertions are OK here.
163 DUIterator_Common::verify_resync();
164 // Make sure we are still in sync, possibly with no more out-edges:
165 verify(_node, true);
166 }
167
reset(const DUIterator & that)168 void DUIterator::reset(const DUIterator& that) {
169 if (this == &that) return; // self assignment is always a no-op
170 assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
171 assert(that._idx == 0, "assign only the result of Node::outs()");
172 assert(_idx == that._idx, "already assigned _idx");
173 if (!_vdui) {
174 // We need to initialize everything, overwriting garbage values.
175 sample(that._node);
176 } else {
177 DUIterator_Common::reset(that);
178 if (_refresh_tick & 1) {
179 _refresh_tick++; // Clear the "was refreshed" flag.
180 }
181 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
182 }
183 }
184
refresh()185 void DUIterator::refresh() {
186 DUIterator_Common::sample(_node); // Re-fetch assertion data.
187 _refresh_tick |= 1; // Set the "was refreshed" flag.
188 }
189
verify_finish()190 void DUIterator::verify_finish() {
191 // If the loop has killed the node, do not require it to re-run.
192 if (_node->_outcnt == 0) _refresh_tick &= ~1;
193 // If this assert triggers, it means that a loop used refresh_out_pos
194 // to re-synch an iteration index, but the loop did not correctly
195 // re-run itself, using a "while (progress)" construct.
196 // This iterator enforces the rule that you must keep trying the loop
197 // until it "runs clean" without any need for refreshing.
198 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
199 }
200
201
verify(const Node * node,bool at_end_ok)202 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
203 DUIterator_Common::verify(node, at_end_ok);
204 Node** out = node->_out;
205 uint cnt = node->_outcnt;
206 assert(cnt == _outcnt, "no insertions allowed");
207 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
208 // This last check is carefully designed to work for NO_OUT_ARRAY.
209 }
210
verify_limit()211 void DUIterator_Fast::verify_limit() {
212 const Node* node = _node;
213 verify(node, true);
214 assert(_outp == node->_out + node->_outcnt, "limit still correct");
215 }
216
verify_resync()217 void DUIterator_Fast::verify_resync() {
218 const Node* node = _node;
219 if (_outp == node->_out + _outcnt) {
220 // Note that the limit imax, not the pointer i, gets updated with the
221 // exact count of deletions. (For the pointer it's always "--i".)
222 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
223 // This is a limit pointer, with a name like "imax".
224 // Fudge the _last field so that the common assert will be happy.
225 _last = (Node*) node->_last_del;
226 DUIterator_Common::verify_resync();
227 } else {
228 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
229 // A normal internal pointer.
230 DUIterator_Common::verify_resync();
231 // Make sure we are still in sync, possibly with no more out-edges:
232 verify(node, true);
233 }
234 }
235
verify_relimit(uint n)236 void DUIterator_Fast::verify_relimit(uint n) {
237 const Node* node = _node;
238 assert((int)n > 0, "use imax -= n only with a positive count");
239 // This must be a limit pointer, with a name like "imax".
240 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
241 // The reported number of deletions must match what the node saw.
242 assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
243 // Fudge the _last field so that the common assert will be happy.
244 _last = (Node*) node->_last_del;
245 DUIterator_Common::verify_resync();
246 }
247
reset(const DUIterator_Fast & that)248 void DUIterator_Fast::reset(const DUIterator_Fast& that) {
249 assert(_outp == that._outp, "already assigned _outp");
250 DUIterator_Common::reset(that);
251 }
252
verify(const Node * node,bool at_end_ok)253 void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
254 // at_end_ok means the _outp is allowed to underflow by 1
255 _outp += at_end_ok;
256 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc.
257 _outp -= at_end_ok;
258 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
259 }
260
verify_limit()261 void DUIterator_Last::verify_limit() {
262 // Do not require the limit address to be resynched.
263 //verify(node, true);
264 assert(_outp == _node->_out, "limit still correct");
265 }
266
verify_step(uint num_edges)267 void DUIterator_Last::verify_step(uint num_edges) {
268 assert((int)num_edges > 0, "need non-zero edge count for loop progress");
269 _outcnt -= num_edges;
270 _del_tick += num_edges;
271 // Make sure we are still in sync, possibly with no more out-edges:
272 const Node* node = _node;
273 verify(node, true);
274 assert(node->_last_del == _last, "must have deleted the edge just produced");
275 }
276
277 #endif //OPTO_DU_ITERATOR_ASSERT
278
279
280 #endif //ASSERT
281
282
283 // This constant used to initialize _out may be any non-null value.
284 // The value NULL is reserved for the top node only.
285 #define NO_OUT_ARRAY ((Node**)-1)
286
287 // Out-of-line code from node constructors.
288 // Executed only when extra debug info. is being passed around.
init_node_notes(Compile * C,int idx,Node_Notes * nn)289 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
290 C->set_node_notes_at(idx, nn);
291 }
292
293 // Shared initialization code.
Init(int req)294 inline int Node::Init(int req) {
295 Compile* C = Compile::current();
296 int idx = C->next_unique();
297
298 // Allocate memory for the necessary number of edges.
299 if (req > 0) {
300 // Allocate space for _in array to have double alignment.
301 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
302 }
303 // If there are default notes floating around, capture them:
304 Node_Notes* nn = C->default_node_notes();
305 if (nn != NULL) init_node_notes(C, idx, nn);
306
307 // Note: At this point, C is dead,
308 // and we begin to initialize the new Node.
309
310 _cnt = _max = req;
311 _outcnt = _outmax = 0;
312 _class_id = Class_Node;
313 _flags = 0;
314 _out = NO_OUT_ARRAY;
315 return idx;
316 }
317
318 //------------------------------Node-------------------------------------------
319 // Create a Node, with a given number of required edges.
Node(uint req)320 Node::Node(uint req)
321 : _idx(Init(req))
322 #ifdef ASSERT
323 , _parse_idx(_idx)
324 #endif
325 {
326 assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
327 debug_only( verify_construction() );
328 NOT_PRODUCT(nodes_created++);
329 if (req == 0) {
330 _in = NULL;
331 } else {
332 Node** to = _in;
333 for(uint i = 0; i < req; i++) {
334 to[i] = NULL;
335 }
336 }
337 }
338
339 //------------------------------Node-------------------------------------------
Node(Node * n0)340 Node::Node(Node *n0)
341 : _idx(Init(1))
342 #ifdef ASSERT
343 , _parse_idx(_idx)
344 #endif
345 {
346 debug_only( verify_construction() );
347 NOT_PRODUCT(nodes_created++);
348 assert( is_not_dead(n0), "can not use dead node");
349 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
350 }
351
352 //------------------------------Node-------------------------------------------
Node(Node * n0,Node * n1)353 Node::Node(Node *n0, Node *n1)
354 : _idx(Init(2))
355 #ifdef ASSERT
356 , _parse_idx(_idx)
357 #endif
358 {
359 debug_only( verify_construction() );
360 NOT_PRODUCT(nodes_created++);
361 assert( is_not_dead(n0), "can not use dead node");
362 assert( is_not_dead(n1), "can not use dead node");
363 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
364 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
365 }
366
367 //------------------------------Node-------------------------------------------
Node(Node * n0,Node * n1,Node * n2)368 Node::Node(Node *n0, Node *n1, Node *n2)
369 : _idx(Init(3))
370 #ifdef ASSERT
371 , _parse_idx(_idx)
372 #endif
373 {
374 debug_only( verify_construction() );
375 NOT_PRODUCT(nodes_created++);
376 assert( is_not_dead(n0), "can not use dead node");
377 assert( is_not_dead(n1), "can not use dead node");
378 assert( is_not_dead(n2), "can not use dead node");
379 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
380 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
381 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
382 }
383
384 //------------------------------Node-------------------------------------------
Node(Node * n0,Node * n1,Node * n2,Node * n3)385 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
386 : _idx(Init(4))
387 #ifdef ASSERT
388 , _parse_idx(_idx)
389 #endif
390 {
391 debug_only( verify_construction() );
392 NOT_PRODUCT(nodes_created++);
393 assert( is_not_dead(n0), "can not use dead node");
394 assert( is_not_dead(n1), "can not use dead node");
395 assert( is_not_dead(n2), "can not use dead node");
396 assert( is_not_dead(n3), "can not use dead node");
397 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
398 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
399 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
400 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
401 }
402
403 //------------------------------Node-------------------------------------------
Node(Node * n0,Node * n1,Node * n2,Node * n3,Node * n4)404 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
405 : _idx(Init(5))
406 #ifdef ASSERT
407 , _parse_idx(_idx)
408 #endif
409 {
410 debug_only( verify_construction() );
411 NOT_PRODUCT(nodes_created++);
412 assert( is_not_dead(n0), "can not use dead node");
413 assert( is_not_dead(n1), "can not use dead node");
414 assert( is_not_dead(n2), "can not use dead node");
415 assert( is_not_dead(n3), "can not use dead node");
416 assert( is_not_dead(n4), "can not use dead node");
417 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
418 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
419 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
420 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
421 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
422 }
423
424 //------------------------------Node-------------------------------------------
Node(Node * n0,Node * n1,Node * n2,Node * n3,Node * n4,Node * n5)425 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
426 Node *n4, Node *n5)
427 : _idx(Init(6))
428 #ifdef ASSERT
429 , _parse_idx(_idx)
430 #endif
431 {
432 debug_only( verify_construction() );
433 NOT_PRODUCT(nodes_created++);
434 assert( is_not_dead(n0), "can not use dead node");
435 assert( is_not_dead(n1), "can not use dead node");
436 assert( is_not_dead(n2), "can not use dead node");
437 assert( is_not_dead(n3), "can not use dead node");
438 assert( is_not_dead(n4), "can not use dead node");
439 assert( is_not_dead(n5), "can not use dead node");
440 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
441 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
442 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
443 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
444 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
445 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
446 }
447
448 //------------------------------Node-------------------------------------------
Node(Node * n0,Node * n1,Node * n2,Node * n3,Node * n4,Node * n5,Node * n6)449 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
450 Node *n4, Node *n5, Node *n6)
451 : _idx(Init(7))
452 #ifdef ASSERT
453 , _parse_idx(_idx)
454 #endif
455 {
456 debug_only( verify_construction() );
457 NOT_PRODUCT(nodes_created++);
458 assert( is_not_dead(n0), "can not use dead node");
459 assert( is_not_dead(n1), "can not use dead node");
460 assert( is_not_dead(n2), "can not use dead node");
461 assert( is_not_dead(n3), "can not use dead node");
462 assert( is_not_dead(n4), "can not use dead node");
463 assert( is_not_dead(n5), "can not use dead node");
464 assert( is_not_dead(n6), "can not use dead node");
465 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
466 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
467 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
468 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
469 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
470 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
471 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
472 }
473
474 #ifdef __clang__
475 #pragma clang diagnostic pop
476 #endif
477
478
479 //------------------------------clone------------------------------------------
480 // Clone a Node.
clone() const481 Node *Node::clone() const {
482 Compile* C = Compile::current();
483 uint s = size_of(); // Size of inherited Node
484 Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
485 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
486 // Set the new input pointer array
487 n->_in = (Node**)(((char*)n)+s);
488 // Cannot share the old output pointer array, so kill it
489 n->_out = NO_OUT_ARRAY;
490 // And reset the counters to 0
491 n->_outcnt = 0;
492 n->_outmax = 0;
493 // Unlock this guy, since he is not in any hash table.
494 debug_only(n->_hash_lock = 0);
495 // Walk the old node's input list to duplicate its edges
496 uint i;
497 for( i = 0; i < len(); i++ ) {
498 Node *x = in(i);
499 n->_in[i] = x;
500 if (x != NULL) x->add_out(n);
501 }
502 if (is_macro())
503 C->add_macro_node(n);
504 if (is_expensive())
505 C->add_expensive_node(n);
506 if (n->is_reduction()) {
507 // Do not copy reduction information. This must be explicitly set by the calling code.
508 n->remove_flag(Node::Flag_is_reduction);
509 }
510 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
511 bs->register_potential_barrier_node(n);
512 // If the cloned node is a range check dependent CastII, add it to the list.
513 CastIINode* cast = n->isa_CastII();
514 if (cast != NULL && cast->has_range_check()) {
515 C->add_range_check_cast(cast);
516 }
517 if (n->Opcode() == Op_Opaque4) {
518 C->add_opaque4_node(n);
519 }
520
521 n->set_idx(C->next_unique()); // Get new unique index as well
522 debug_only( n->verify_construction() );
523 NOT_PRODUCT(nodes_created++);
524 // Do not patch over the debug_idx of a clone, because it makes it
525 // impossible to break on the clone's moment of creation.
526 //debug_only( n->set_debug_idx( debug_idx() ) );
527
528 C->copy_node_notes_to(n, (Node*) this);
529
530 // MachNode clone
531 uint nopnds;
532 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
533 MachNode *mach = n->as_Mach();
534 MachNode *mthis = this->as_Mach();
535 // Get address of _opnd_array.
536 // It should be the same offset since it is the clone of this node.
537 MachOper **from = mthis->_opnds;
538 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
539 pointer_delta((const void*)from,
540 (const void*)(&mthis->_opnds), 1));
541 mach->_opnds = to;
542 for ( uint i = 0; i < nopnds; ++i ) {
543 to[i] = from[i]->clone();
544 }
545 }
546 // cloning CallNode may need to clone JVMState
547 if (n->is_Call()) {
548 n->as_Call()->clone_jvms(C);
549 }
550 if (n->is_SafePoint()) {
551 n->as_SafePoint()->clone_replaced_nodes();
552 }
553 return n; // Return the clone
554 }
555
556 //---------------------------setup_is_top--------------------------------------
557 // Call this when changing the top node, to reassert the invariants
558 // required by Node::is_top. See Compile::set_cached_top_node.
setup_is_top()559 void Node::setup_is_top() {
560 if (this == (Node*)Compile::current()->top()) {
561 // This node has just become top. Kill its out array.
562 _outcnt = _outmax = 0;
563 _out = NULL; // marker value for top
564 assert(is_top(), "must be top");
565 } else {
566 if (_out == NULL) _out = NO_OUT_ARRAY;
567 assert(!is_top(), "must not be top");
568 }
569 }
570
571
572 //------------------------------~Node------------------------------------------
573 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
destruct()574 void Node::destruct() {
575 Compile* compile = Compile::current();
576 // If this is the most recently created node, reclaim its index. Otherwise,
577 // record the node as dead to keep liveness information accurate.
578 if ((uint)_idx+1 == compile->unique()) {
579 compile->set_unique(compile->unique()-1);
580 } else {
581 compile->record_dead_node(_idx);
582 }
583 // Clear debug info:
584 Node_Notes* nn = compile->node_notes_at(_idx);
585 if (nn != NULL) nn->clear();
586 // Walk the input array, freeing the corresponding output edges
587 _cnt = _max; // forget req/prec distinction
588 uint i;
589 for( i = 0; i < _max; i++ ) {
590 set_req(i, NULL);
591 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
592 }
593 assert(outcnt() == 0, "deleting a node must not leave a dangling use");
594 // See if the input array was allocated just prior to the object
595 int edge_size = _max*sizeof(void*);
596 int out_edge_size = _outmax*sizeof(void*);
597 char *edge_end = ((char*)_in) + edge_size;
598 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
599 int node_size = size_of();
600
601 // Free the output edge array
602 if (out_edge_size > 0) {
603 compile->node_arena()->Afree(out_array, out_edge_size);
604 }
605
606 // Free the input edge array and the node itself
607 if( edge_end == (char*)this ) {
608 // It was; free the input array and object all in one hit
609 #ifndef ASSERT
610 compile->node_arena()->Afree(_in,edge_size+node_size);
611 #endif
612 } else {
613 // Free just the input array
614 compile->node_arena()->Afree(_in,edge_size);
615
616 // Free just the object
617 #ifndef ASSERT
618 compile->node_arena()->Afree(this,node_size);
619 #endif
620 }
621 if (is_macro()) {
622 compile->remove_macro_node(this);
623 }
624 if (is_expensive()) {
625 compile->remove_expensive_node(this);
626 }
627 CastIINode* cast = isa_CastII();
628 if (cast != NULL && cast->has_range_check()) {
629 compile->remove_range_check_cast(cast);
630 }
631 if (Opcode() == Op_Opaque4) {
632 compile->remove_opaque4_node(this);
633 }
634
635 if (is_SafePoint()) {
636 as_SafePoint()->delete_replaced_nodes();
637 }
638 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
639 bs->unregister_potential_barrier_node(this);
640 #ifdef ASSERT
641 // We will not actually delete the storage, but we'll make the node unusable.
642 *(address*)this = badAddress; // smash the C++ vtbl, probably
643 _in = _out = (Node**) badAddress;
644 _max = _cnt = _outmax = _outcnt = 0;
645 compile->remove_modified_node(this);
646 #endif
647 }
648
649 //------------------------------grow-------------------------------------------
650 // Grow the input array, making space for more edges
grow(uint len)651 void Node::grow( uint len ) {
652 Arena* arena = Compile::current()->node_arena();
653 uint new_max = _max;
654 if( new_max == 0 ) {
655 _max = 4;
656 _in = (Node**)arena->Amalloc(4*sizeof(Node*));
657 Node** to = _in;
658 to[0] = NULL;
659 to[1] = NULL;
660 to[2] = NULL;
661 to[3] = NULL;
662 return;
663 }
664 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
665 // Trimming to limit allows a uint8 to handle up to 255 edges.
666 // Previously I was using only powers-of-2 which peaked at 128 edges.
667 //if( new_max >= limit ) new_max = limit-1;
668 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
669 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
670 _max = new_max; // Record new max length
671 // This assertion makes sure that Node::_max is wide enough to
672 // represent the numerical value of new_max.
673 assert(_max == new_max && _max > len, "int width of _max is too small");
674 }
675
676 //-----------------------------out_grow----------------------------------------
677 // Grow the input array, making space for more edges
out_grow(uint len)678 void Node::out_grow( uint len ) {
679 assert(!is_top(), "cannot grow a top node's out array");
680 Arena* arena = Compile::current()->node_arena();
681 uint new_max = _outmax;
682 if( new_max == 0 ) {
683 _outmax = 4;
684 _out = (Node **)arena->Amalloc(4*sizeof(Node*));
685 return;
686 }
687 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
688 // Trimming to limit allows a uint8 to handle up to 255 edges.
689 // Previously I was using only powers-of-2 which peaked at 128 edges.
690 //if( new_max >= limit ) new_max = limit-1;
691 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
692 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
693 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
694 _outmax = new_max; // Record new max length
695 // This assertion makes sure that Node::_max is wide enough to
696 // represent the numerical value of new_max.
697 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
698 }
699
700 #ifdef ASSERT
701 //------------------------------is_dead----------------------------------------
is_dead() const702 bool Node::is_dead() const {
703 // Mach and pinch point nodes may look like dead.
704 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
705 return false;
706 for( uint i = 0; i < _max; i++ )
707 if( _in[i] != NULL )
708 return false;
709 dump();
710 return true;
711 }
712
is_reachable_from_root() const713 bool Node::is_reachable_from_root() const {
714 ResourceMark rm;
715 Unique_Node_List wq;
716 wq.push((Node*)this);
717 RootNode* root = Compile::current()->root();
718 for (uint i = 0; i < wq.size(); i++) {
719 Node* m = wq.at(i);
720 if (m == root) {
721 return true;
722 }
723 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
724 Node* u = m->fast_out(j);
725 wq.push(u);
726 }
727 }
728 return false;
729 }
730 #endif
731
732 //------------------------------is_unreachable---------------------------------
is_unreachable(PhaseIterGVN & igvn) const733 bool Node::is_unreachable(PhaseIterGVN &igvn) const {
734 assert(!is_Mach(), "doesn't work with MachNodes");
735 return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != NULL && in(0)->is_top());
736 }
737
738 //------------------------------add_req----------------------------------------
739 // Add a new required input at the end
add_req(Node * n)740 void Node::add_req( Node *n ) {
741 assert( is_not_dead(n), "can not use dead node");
742
743 // Look to see if I can move precedence down one without reallocating
744 if( (_cnt >= _max) || (in(_max-1) != NULL) )
745 grow( _max+1 );
746
747 // Find a precedence edge to move
748 if( in(_cnt) != NULL ) { // Next precedence edge is busy?
749 uint i;
750 for( i=_cnt; i<_max; i++ )
751 if( in(i) == NULL ) // Find the NULL at end of prec edge list
752 break; // There must be one, since we grew the array
753 _in[i] = in(_cnt); // Move prec over, making space for req edge
754 }
755 _in[_cnt++] = n; // Stuff over old prec edge
756 if (n != NULL) n->add_out((Node *)this);
757 }
758
759 //---------------------------add_req_batch-------------------------------------
760 // Add a new required input at the end
add_req_batch(Node * n,uint m)761 void Node::add_req_batch( Node *n, uint m ) {
762 assert( is_not_dead(n), "can not use dead node");
763 // check various edge cases
764 if ((int)m <= 1) {
765 assert((int)m >= 0, "oob");
766 if (m != 0) add_req(n);
767 return;
768 }
769
770 // Look to see if I can move precedence down one without reallocating
771 if( (_cnt+m) > _max || _in[_max-m] )
772 grow( _max+m );
773
774 // Find a precedence edge to move
775 if( _in[_cnt] != NULL ) { // Next precedence edge is busy?
776 uint i;
777 for( i=_cnt; i<_max; i++ )
778 if( _in[i] == NULL ) // Find the NULL at end of prec edge list
779 break; // There must be one, since we grew the array
780 // Slide all the precs over by m positions (assume #prec << m).
781 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
782 }
783
784 // Stuff over the old prec edges
785 for(uint i=0; i<m; i++ ) {
786 _in[_cnt++] = n;
787 }
788
789 // Insert multiple out edges on the node.
790 if (n != NULL && !n->is_top()) {
791 for(uint i=0; i<m; i++ ) {
792 n->add_out((Node *)this);
793 }
794 }
795 }
796
797 //------------------------------del_req----------------------------------------
798 // Delete the required edge and compact the edge array
del_req(uint idx)799 void Node::del_req( uint idx ) {
800 assert( idx < _cnt, "oob");
801 assert( !VerifyHashTableKeys || _hash_lock == 0,
802 "remove node from hash table before modifying it");
803 // First remove corresponding def-use edge
804 Node *n = in(idx);
805 if (n != NULL) n->del_out((Node *)this);
806 _in[idx] = in(--_cnt); // Compact the array
807 // Avoid spec violation: Gap in prec edges.
808 close_prec_gap_at(_cnt);
809 Compile::current()->record_modified_node(this);
810 }
811
812 //------------------------------del_req_ordered--------------------------------
813 // Delete the required edge and compact the edge array with preserved order
del_req_ordered(uint idx)814 void Node::del_req_ordered( uint idx ) {
815 assert( idx < _cnt, "oob");
816 assert( !VerifyHashTableKeys || _hash_lock == 0,
817 "remove node from hash table before modifying it");
818 // First remove corresponding def-use edge
819 Node *n = in(idx);
820 if (n != NULL) n->del_out((Node *)this);
821 if (idx < --_cnt) { // Not last edge ?
822 Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*)));
823 }
824 // Avoid spec violation: Gap in prec edges.
825 close_prec_gap_at(_cnt);
826 Compile::current()->record_modified_node(this);
827 }
828
829 //------------------------------ins_req----------------------------------------
830 // Insert a new required input at the end
ins_req(uint idx,Node * n)831 void Node::ins_req( uint idx, Node *n ) {
832 assert( is_not_dead(n), "can not use dead node");
833 add_req(NULL); // Make space
834 assert( idx < _max, "Must have allocated enough space");
835 // Slide over
836 if(_cnt-idx-1 > 0) {
837 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
838 }
839 _in[idx] = n; // Stuff over old required edge
840 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
841 }
842
843 //-----------------------------find_edge---------------------------------------
find_edge(Node * n)844 int Node::find_edge(Node* n) {
845 for (uint i = 0; i < len(); i++) {
846 if (_in[i] == n) return i;
847 }
848 return -1;
849 }
850
851 //----------------------------replace_edge-------------------------------------
replace_edge(Node * old,Node * neww)852 int Node::replace_edge(Node* old, Node* neww) {
853 if (old == neww) return 0; // nothing to do
854 uint nrep = 0;
855 for (uint i = 0; i < len(); i++) {
856 if (in(i) == old) {
857 if (i < req()) {
858 set_req(i, neww);
859 } else {
860 assert(find_prec_edge(neww) == -1, "spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx);
861 set_prec(i, neww);
862 }
863 nrep++;
864 }
865 }
866 return nrep;
867 }
868
869 /**
870 * Replace input edges in the range pointing to 'old' node.
871 */
replace_edges_in_range(Node * old,Node * neww,int start,int end)872 int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) {
873 if (old == neww) return 0; // nothing to do
874 uint nrep = 0;
875 for (int i = start; i < end; i++) {
876 if (in(i) == old) {
877 set_req(i, neww);
878 nrep++;
879 }
880 }
881 return nrep;
882 }
883
884 //-------------------------disconnect_inputs-----------------------------------
885 // NULL out all inputs to eliminate incoming Def-Use edges.
886 // Return the number of edges between 'n' and 'this'
disconnect_inputs(Node * n,Compile * C)887 int Node::disconnect_inputs(Node *n, Compile* C) {
888 int edges_to_n = 0;
889
890 uint cnt = req();
891 for( uint i = 0; i < cnt; ++i ) {
892 if( in(i) == 0 ) continue;
893 if( in(i) == n ) ++edges_to_n;
894 set_req(i, NULL);
895 }
896 // Remove precedence edges if any exist
897 // Note: Safepoints may have precedence edges, even during parsing
898 if( (req() != len()) && (in(req()) != NULL) ) {
899 uint max = len();
900 for( uint i = 0; i < max; ++i ) {
901 if( in(i) == 0 ) continue;
902 if( in(i) == n ) ++edges_to_n;
903 set_prec(i, NULL);
904 }
905 }
906
907 // Node::destruct requires all out edges be deleted first
908 // debug_only(destruct();) // no reuse benefit expected
909 if (edges_to_n == 0) {
910 C->record_dead_node(_idx);
911 }
912 return edges_to_n;
913 }
914
915 //-----------------------------uncast---------------------------------------
916 // %%% Temporary, until we sort out CheckCastPP vs. CastPP.
917 // Strip away casting. (It is depth-limited.)
uncast() const918 Node* Node::uncast() const {
919 // Should be inline:
920 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
921 if (is_ConstraintCast())
922 return uncast_helper(this);
923 else
924 return (Node*) this;
925 }
926
927 // Find out of current node that matches opcode.
find_out_with(int opcode)928 Node* Node::find_out_with(int opcode) {
929 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
930 Node* use = fast_out(i);
931 if (use->Opcode() == opcode) {
932 return use;
933 }
934 }
935 return NULL;
936 }
937
938 // Return true if the current node has an out that matches opcode.
has_out_with(int opcode)939 bool Node::has_out_with(int opcode) {
940 return (find_out_with(opcode) != NULL);
941 }
942
943 // Return true if the current node has an out that matches any of the opcodes.
has_out_with(int opcode1,int opcode2,int opcode3,int opcode4)944 bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) {
945 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
946 int opcode = fast_out(i)->Opcode();
947 if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) {
948 return true;
949 }
950 }
951 return false;
952 }
953
954
955 //---------------------------uncast_helper-------------------------------------
uncast_helper(const Node * p)956 Node* Node::uncast_helper(const Node* p) {
957 #ifdef ASSERT
958 uint depth_count = 0;
959 const Node* orig_p = p;
960 #endif
961
962 while (true) {
963 #ifdef ASSERT
964 if (depth_count >= K) {
965 orig_p->dump(4);
966 if (p != orig_p)
967 p->dump(1);
968 }
969 assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
970 #endif
971 if (p == NULL || p->req() != 2) {
972 break;
973 } else if (p->is_ConstraintCast()) {
974 p = p->in(1);
975 } else {
976 break;
977 }
978 }
979 return (Node*) p;
980 }
981
982 //------------------------------add_prec---------------------------------------
983 // Add a new precedence input. Precedence inputs are unordered, with
984 // duplicates removed and NULLs packed down at the end.
add_prec(Node * n)985 void Node::add_prec( Node *n ) {
986 assert( is_not_dead(n), "can not use dead node");
987
988 // Check for NULL at end
989 if( _cnt >= _max || in(_max-1) )
990 grow( _max+1 );
991
992 // Find a precedence edge to move
993 uint i = _cnt;
994 while( in(i) != NULL ) {
995 if (in(i) == n) return; // Avoid spec violation: duplicated prec edge.
996 i++;
997 }
998 _in[i] = n; // Stuff prec edge over NULL
999 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge
1000
1001 #ifdef ASSERT
1002 while ((++i)<_max) { assert(_in[i] == NULL, "spec violation: Gap in prec edges (node %d)", _idx); }
1003 #endif
1004 }
1005
1006 //------------------------------rm_prec----------------------------------------
1007 // Remove a precedence input. Precedence inputs are unordered, with
1008 // duplicates removed and NULLs packed down at the end.
rm_prec(uint j)1009 void Node::rm_prec( uint j ) {
1010 assert(j < _max, "oob: i=%d, _max=%d", j, _max);
1011 assert(j >= _cnt, "not a precedence edge");
1012 if (_in[j] == NULL) return; // Avoid spec violation: Gap in prec edges.
1013 _in[j]->del_out((Node *)this);
1014 close_prec_gap_at(j);
1015 }
1016
1017 //------------------------------size_of----------------------------------------
size_of() const1018 uint Node::size_of() const { return sizeof(*this); }
1019
1020 //------------------------------ideal_reg--------------------------------------
ideal_reg() const1021 uint Node::ideal_reg() const { return 0; }
1022
1023 //------------------------------jvms-------------------------------------------
jvms() const1024 JVMState* Node::jvms() const { return NULL; }
1025
1026 #ifdef ASSERT
1027 //------------------------------jvms-------------------------------------------
verify_jvms(const JVMState * using_jvms) const1028 bool Node::verify_jvms(const JVMState* using_jvms) const {
1029 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1030 if (jvms == using_jvms) return true;
1031 }
1032 return false;
1033 }
1034
1035 //------------------------------init_NodeProperty------------------------------
init_NodeProperty()1036 void Node::init_NodeProperty() {
1037 assert(_max_classes <= max_jushort, "too many NodeProperty classes");
1038 assert(_max_flags <= max_jushort, "too many NodeProperty flags");
1039 }
1040 #endif
1041
1042 //------------------------------format-----------------------------------------
1043 // Print as assembly
format(PhaseRegAlloc *,outputStream * st) const1044 void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
1045 //------------------------------emit-------------------------------------------
1046 // Emit bytes starting at parameter 'ptr'.
emit(CodeBuffer & cbuf,PhaseRegAlloc * ra_) const1047 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
1048 //------------------------------size-------------------------------------------
1049 // Size of instruction in bytes
size(PhaseRegAlloc * ra_) const1050 uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
1051
1052 //------------------------------CFG Construction-------------------------------
1053 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
1054 // Goto and Return.
is_block_proj() const1055 const Node *Node::is_block_proj() const { return 0; }
1056
1057 // Minimum guaranteed type
bottom_type() const1058 const Type *Node::bottom_type() const { return Type::BOTTOM; }
1059
1060
1061 //------------------------------raise_bottom_type------------------------------
1062 // Get the worst-case Type output for this Node.
raise_bottom_type(const Type * new_type)1063 void Node::raise_bottom_type(const Type* new_type) {
1064 if (is_Type()) {
1065 TypeNode *n = this->as_Type();
1066 if (VerifyAliases) {
1067 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1068 }
1069 n->set_type(new_type);
1070 } else if (is_Load()) {
1071 LoadNode *n = this->as_Load();
1072 if (VerifyAliases) {
1073 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1074 }
1075 n->set_type(new_type);
1076 }
1077 }
1078
1079 //------------------------------Identity---------------------------------------
1080 // Return a node that the given node is equivalent to.
Identity(PhaseGVN * phase)1081 Node* Node::Identity(PhaseGVN* phase) {
1082 return this; // Default to no identities
1083 }
1084
1085 //------------------------------Value------------------------------------------
1086 // Compute a new Type for a node using the Type of the inputs.
Value(PhaseGVN * phase) const1087 const Type* Node::Value(PhaseGVN* phase) const {
1088 return bottom_type(); // Default to worst-case Type
1089 }
1090
1091 //------------------------------Ideal------------------------------------------
1092 //
1093 // 'Idealize' the graph rooted at this Node.
1094 //
1095 // In order to be efficient and flexible there are some subtle invariants
1096 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks
1097 // these invariants, although its too slow to have on by default. If you are
1098 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
1099 //
1100 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
1101 // pointer. If ANY change is made, it must return the root of the reshaped
1102 // graph - even if the root is the same Node. Example: swapping the inputs
1103 // to an AddINode gives the same answer and same root, but you still have to
1104 // return the 'this' pointer instead of NULL.
1105 //
1106 // You cannot return an OLD Node, except for the 'this' pointer. Use the
1107 // Identity call to return an old Node; basically if Identity can find
1108 // another Node have the Ideal call make no change and return NULL.
1109 // Example: AddINode::Ideal must check for add of zero; in this case it
1110 // returns NULL instead of doing any graph reshaping.
1111 //
1112 // You cannot modify any old Nodes except for the 'this' pointer. Due to
1113 // sharing there may be other users of the old Nodes relying on their current
1114 // semantics. Modifying them will break the other users.
1115 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
1116 // "X+3" unchanged in case it is shared.
1117 //
1118 // If you modify the 'this' pointer's inputs, you should use
1119 // 'set_req'. If you are making a new Node (either as the new root or
1120 // some new internal piece) you may use 'init_req' to set the initial
1121 // value. You can make a new Node with either 'new' or 'clone'. In
1122 // either case, def-use info is correctly maintained.
1123 //
1124 // Example: reshape "(X+3)+4" into "X+7":
1125 // set_req(1, in(1)->in(1));
1126 // set_req(2, phase->intcon(7));
1127 // return this;
1128 // Example: reshape "X*4" into "X<<2"
1129 // return new LShiftINode(in(1), phase->intcon(2));
1130 //
1131 // You must call 'phase->transform(X)' on any new Nodes X you make, except
1132 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X".
1133 // Node *shift=phase->transform(new LShiftINode(in(1),phase->intcon(5)));
1134 // return new AddINode(shift, in(1));
1135 //
1136 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
1137 // These forms are faster than 'phase->transform(new ConNode())' and Do
1138 // The Right Thing with def-use info.
1139 //
1140 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped
1141 // graph uses the 'this' Node it must be the root. If you want a Node with
1142 // the same Opcode as the 'this' pointer use 'clone'.
1143 //
Ideal(PhaseGVN * phase,bool can_reshape)1144 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
1145 return NULL; // Default to being Ideal already
1146 }
1147
1148 // Some nodes have specific Ideal subgraph transformations only if they are
1149 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1150 // for the transformations to happen.
has_special_unique_user() const1151 bool Node::has_special_unique_user() const {
1152 assert(outcnt() == 1, "match only for unique out");
1153 Node* n = unique_out();
1154 int op = Opcode();
1155 if (this->is_Store()) {
1156 // Condition for back-to-back stores folding.
1157 return n->Opcode() == op && n->in(MemNode::Memory) == this;
1158 } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
1159 // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
1160 return n->Opcode() == Op_MemBarAcquire;
1161 } else if (op == Op_AddL) {
1162 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
1163 return n->Opcode() == Op_ConvL2I && n->in(1) == this;
1164 } else if (op == Op_SubI || op == Op_SubL) {
1165 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
1166 return n->Opcode() == op && n->in(2) == this;
1167 } else if (is_If() && (n->is_IfFalse() || n->is_IfTrue())) {
1168 // See IfProjNode::Identity()
1169 return true;
1170 }
1171 return false;
1172 };
1173
1174 //--------------------------find_exact_control---------------------------------
1175 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
find_exact_control(Node * ctrl)1176 Node* Node::find_exact_control(Node* ctrl) {
1177 if (ctrl == NULL && this->is_Region())
1178 ctrl = this->as_Region()->is_copy();
1179
1180 if (ctrl != NULL && ctrl->is_CatchProj()) {
1181 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
1182 ctrl = ctrl->in(0);
1183 if (ctrl != NULL && !ctrl->is_top())
1184 ctrl = ctrl->in(0);
1185 }
1186
1187 if (ctrl != NULL && ctrl->is_Proj())
1188 ctrl = ctrl->in(0);
1189
1190 return ctrl;
1191 }
1192
1193 //--------------------------dominates------------------------------------------
1194 // Helper function for MemNode::all_controls_dominate().
1195 // Check if 'this' control node dominates or equal to 'sub' control node.
1196 // We already know that if any path back to Root or Start reaches 'this',
1197 // then all paths so, so this is a simple search for one example,
1198 // not an exhaustive search for a counterexample.
dominates(Node * sub,Node_List & nlist)1199 bool Node::dominates(Node* sub, Node_List &nlist) {
1200 assert(this->is_CFG(), "expecting control");
1201 assert(sub != NULL && sub->is_CFG(), "expecting control");
1202
1203 // detect dead cycle without regions
1204 int iterations_without_region_limit = DominatorSearchLimit;
1205
1206 Node* orig_sub = sub;
1207 Node* dom = this;
1208 bool met_dom = false;
1209 nlist.clear();
1210
1211 // Walk 'sub' backward up the chain to 'dom', watching for regions.
1212 // After seeing 'dom', continue up to Root or Start.
1213 // If we hit a region (backward split point), it may be a loop head.
1214 // Keep going through one of the region's inputs. If we reach the
1215 // same region again, go through a different input. Eventually we
1216 // will either exit through the loop head, or give up.
1217 // (If we get confused, break out and return a conservative 'false'.)
1218 while (sub != NULL) {
1219 if (sub->is_top()) break; // Conservative answer for dead code.
1220 if (sub == dom) {
1221 if (nlist.size() == 0) {
1222 // No Region nodes except loops were visited before and the EntryControl
1223 // path was taken for loops: it did not walk in a cycle.
1224 return true;
1225 } else if (met_dom) {
1226 break; // already met before: walk in a cycle
1227 } else {
1228 // Region nodes were visited. Continue walk up to Start or Root
1229 // to make sure that it did not walk in a cycle.
1230 met_dom = true; // first time meet
1231 iterations_without_region_limit = DominatorSearchLimit; // Reset
1232 }
1233 }
1234 if (sub->is_Start() || sub->is_Root()) {
1235 // Success if we met 'dom' along a path to Start or Root.
1236 // We assume there are no alternative paths that avoid 'dom'.
1237 // (This assumption is up to the caller to ensure!)
1238 return met_dom;
1239 }
1240 Node* up = sub->in(0);
1241 // Normalize simple pass-through regions and projections:
1242 up = sub->find_exact_control(up);
1243 // If sub == up, we found a self-loop. Try to push past it.
1244 if (sub == up && sub->is_Loop()) {
1245 // Take loop entry path on the way up to 'dom'.
1246 up = sub->in(1); // in(LoopNode::EntryControl);
1247 } else if (sub == up && sub->is_Region() && sub->req() != 3) {
1248 // Always take in(1) path on the way up to 'dom' for clone regions
1249 // (with only one input) or regions which merge > 2 paths
1250 // (usually used to merge fast/slow paths).
1251 up = sub->in(1);
1252 } else if (sub == up && sub->is_Region()) {
1253 // Try both paths for Regions with 2 input paths (it may be a loop head).
1254 // It could give conservative 'false' answer without information
1255 // which region's input is the entry path.
1256 iterations_without_region_limit = DominatorSearchLimit; // Reset
1257
1258 bool region_was_visited_before = false;
1259 // Was this Region node visited before?
1260 // If so, we have reached it because we accidentally took a
1261 // loop-back edge from 'sub' back into the body of the loop,
1262 // and worked our way up again to the loop header 'sub'.
1263 // So, take the first unexplored path on the way up to 'dom'.
1264 for (int j = nlist.size() - 1; j >= 0; j--) {
1265 intptr_t ni = (intptr_t)nlist.at(j);
1266 Node* visited = (Node*)(ni & ~1);
1267 bool visited_twice_already = ((ni & 1) != 0);
1268 if (visited == sub) {
1269 if (visited_twice_already) {
1270 // Visited 2 paths, but still stuck in loop body. Give up.
1271 return false;
1272 }
1273 // The Region node was visited before only once.
1274 // (We will repush with the low bit set, below.)
1275 nlist.remove(j);
1276 // We will find a new edge and re-insert.
1277 region_was_visited_before = true;
1278 break;
1279 }
1280 }
1281
1282 // Find an incoming edge which has not been seen yet; walk through it.
1283 assert(up == sub, "");
1284 uint skip = region_was_visited_before ? 1 : 0;
1285 for (uint i = 1; i < sub->req(); i++) {
1286 Node* in = sub->in(i);
1287 if (in != NULL && !in->is_top() && in != sub) {
1288 if (skip == 0) {
1289 up = in;
1290 break;
1291 }
1292 --skip; // skip this nontrivial input
1293 }
1294 }
1295
1296 // Set 0 bit to indicate that both paths were taken.
1297 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
1298 }
1299
1300 if (up == sub) {
1301 break; // some kind of tight cycle
1302 }
1303 if (up == orig_sub && met_dom) {
1304 // returned back after visiting 'dom'
1305 break; // some kind of cycle
1306 }
1307 if (--iterations_without_region_limit < 0) {
1308 break; // dead cycle
1309 }
1310 sub = up;
1311 }
1312
1313 // Did not meet Root or Start node in pred. chain.
1314 // Conservative answer for dead code.
1315 return false;
1316 }
1317
1318 //------------------------------remove_dead_region-----------------------------
1319 // This control node is dead. Follow the subgraph below it making everything
1320 // using it dead as well. This will happen normally via the usual IterGVN
1321 // worklist but this call is more efficient. Do not update use-def info
1322 // inside the dead region, just at the borders.
kill_dead_code(Node * dead,PhaseIterGVN * igvn)1323 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
1324 // Con's are a popular node to re-hit in the hash table again.
1325 if( dead->is_Con() ) return;
1326
1327 // Can't put ResourceMark here since igvn->_worklist uses the same arena
1328 // for verify pass with +VerifyOpto and we add/remove elements in it here.
1329 Node_List nstack(Thread::current()->resource_area());
1330
1331 Node *top = igvn->C->top();
1332 nstack.push(dead);
1333 bool has_irreducible_loop = igvn->C->has_irreducible_loop();
1334
1335 while (nstack.size() > 0) {
1336 dead = nstack.pop();
1337 if (dead->Opcode() == Op_SafePoint) {
1338 dead->as_SafePoint()->disconnect_from_root(igvn);
1339 }
1340 if (dead->outcnt() > 0) {
1341 // Keep dead node on stack until all uses are processed.
1342 nstack.push(dead);
1343 // For all Users of the Dead... ;-)
1344 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
1345 Node* use = dead->last_out(k);
1346 igvn->hash_delete(use); // Yank from hash table prior to mod
1347 if (use->in(0) == dead) { // Found another dead node
1348 assert (!use->is_Con(), "Control for Con node should be Root node.");
1349 use->set_req(0, top); // Cut dead edge to prevent processing
1350 nstack.push(use); // the dead node again.
1351 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop
1352 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode)
1353 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead
1354 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing
1355 use->set_req(0, top); // Cut self edge
1356 nstack.push(use);
1357 } else { // Else found a not-dead user
1358 // Dead if all inputs are top or null
1359 bool dead_use = !use->is_Root(); // Keep empty graph alive
1360 for (uint j = 1; j < use->req(); j++) {
1361 Node* in = use->in(j);
1362 if (in == dead) { // Turn all dead inputs into TOP
1363 use->set_req(j, top);
1364 } else if (in != NULL && !in->is_top()) {
1365 dead_use = false;
1366 }
1367 }
1368 if (dead_use) {
1369 if (use->is_Region()) {
1370 use->set_req(0, top); // Cut self edge
1371 }
1372 nstack.push(use);
1373 } else {
1374 igvn->_worklist.push(use);
1375 }
1376 }
1377 // Refresh the iterator, since any number of kills might have happened.
1378 k = dead->last_outs(kmin);
1379 }
1380 } else { // (dead->outcnt() == 0)
1381 // Done with outputs.
1382 igvn->hash_delete(dead);
1383 igvn->_worklist.remove(dead);
1384 igvn->C->remove_modified_node(dead);
1385 igvn->set_type(dead, Type::TOP);
1386 if (dead->is_macro()) {
1387 igvn->C->remove_macro_node(dead);
1388 }
1389 if (dead->is_expensive()) {
1390 igvn->C->remove_expensive_node(dead);
1391 }
1392 CastIINode* cast = dead->isa_CastII();
1393 if (cast != NULL && cast->has_range_check()) {
1394 igvn->C->remove_range_check_cast(cast);
1395 }
1396 if (dead->Opcode() == Op_Opaque4) {
1397 igvn->C->remove_opaque4_node(dead);
1398 }
1399 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1400 bs->unregister_potential_barrier_node(dead);
1401 igvn->C->record_dead_node(dead->_idx);
1402 // Kill all inputs to the dead guy
1403 for (uint i=0; i < dead->req(); i++) {
1404 Node *n = dead->in(i); // Get input to dead guy
1405 if (n != NULL && !n->is_top()) { // Input is valid?
1406 dead->set_req(i, top); // Smash input away
1407 if (n->outcnt() == 0) { // Input also goes dead?
1408 if (!n->is_Con())
1409 nstack.push(n); // Clear it out as well
1410 } else if (n->outcnt() == 1 &&
1411 n->has_special_unique_user()) {
1412 igvn->add_users_to_worklist( n );
1413 } else if (n->outcnt() <= 2 && n->is_Store()) {
1414 // Push store's uses on worklist to enable folding optimization for
1415 // store/store and store/load to the same address.
1416 // The restriction (outcnt() <= 2) is the same as in set_req_X()
1417 // and remove_globally_dead_node().
1418 igvn->add_users_to_worklist( n );
1419 } else {
1420 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn->_worklist, n);
1421 }
1422 }
1423 }
1424 } // (dead->outcnt() == 0)
1425 } // while (nstack.size() > 0) for outputs
1426 return;
1427 }
1428
1429 //------------------------------remove_dead_region-----------------------------
remove_dead_region(PhaseGVN * phase,bool can_reshape)1430 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
1431 Node *n = in(0);
1432 if( !n ) return false;
1433 // Lost control into this guy? I.e., it became unreachable?
1434 // Aggressively kill all unreachable code.
1435 if (can_reshape && n->is_top()) {
1436 kill_dead_code(this, phase->is_IterGVN());
1437 return false; // Node is dead.
1438 }
1439
1440 if( n->is_Region() && n->as_Region()->is_copy() ) {
1441 Node *m = n->nonnull_req();
1442 set_req(0, m);
1443 return true;
1444 }
1445 return false;
1446 }
1447
1448 //------------------------------hash-------------------------------------------
1449 // Hash function over Nodes.
hash() const1450 uint Node::hash() const {
1451 uint sum = 0;
1452 for( uint i=0; i<_cnt; i++ ) // Add in all inputs
1453 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs
1454 return (sum>>2) + _cnt + Opcode();
1455 }
1456
1457 //------------------------------cmp--------------------------------------------
1458 // Compare special parts of simple Nodes
cmp(const Node & n) const1459 uint Node::cmp( const Node &n ) const {
1460 return 1; // Must be same
1461 }
1462
1463 //------------------------------rematerialize-----------------------------------
1464 // Should we clone rather than spill this instruction?
rematerialize() const1465 bool Node::rematerialize() const {
1466 if ( is_Mach() )
1467 return this->as_Mach()->rematerialize();
1468 else
1469 return (_flags & Flag_rematerialize) != 0;
1470 }
1471
1472 //------------------------------needs_anti_dependence_check---------------------
1473 // Nodes which use memory without consuming it, hence need antidependences.
needs_anti_dependence_check() const1474 bool Node::needs_anti_dependence_check() const {
1475 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
1476 return false;
1477 else
1478 return in(1)->bottom_type()->has_memory();
1479 }
1480
1481
1482 // Get an integer constant from a ConNode (or CastIINode).
1483 // Return a default value if there is no apparent constant here.
find_int_type() const1484 const TypeInt* Node::find_int_type() const {
1485 if (this->is_Type()) {
1486 return this->as_Type()->type()->isa_int();
1487 } else if (this->is_Con()) {
1488 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1489 return this->bottom_type()->isa_int();
1490 }
1491 return NULL;
1492 }
1493
1494 // Get a pointer constant from a ConstNode.
1495 // Returns the constant if it is a pointer ConstNode
get_ptr() const1496 intptr_t Node::get_ptr() const {
1497 assert( Opcode() == Op_ConP, "" );
1498 return ((ConPNode*)this)->type()->is_ptr()->get_con();
1499 }
1500
1501 // Get a narrow oop constant from a ConNNode.
get_narrowcon() const1502 intptr_t Node::get_narrowcon() const {
1503 assert( Opcode() == Op_ConN, "" );
1504 return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
1505 }
1506
1507 // Get a long constant from a ConNode.
1508 // Return a default value if there is no apparent constant here.
find_long_type() const1509 const TypeLong* Node::find_long_type() const {
1510 if (this->is_Type()) {
1511 return this->as_Type()->type()->isa_long();
1512 } else if (this->is_Con()) {
1513 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1514 return this->bottom_type()->isa_long();
1515 }
1516 return NULL;
1517 }
1518
1519
1520 /**
1521 * Return a ptr type for nodes which should have it.
1522 */
get_ptr_type() const1523 const TypePtr* Node::get_ptr_type() const {
1524 const TypePtr* tp = this->bottom_type()->make_ptr();
1525 #ifdef ASSERT
1526 if (tp == NULL) {
1527 this->dump(1);
1528 assert((tp != NULL), "unexpected node type");
1529 }
1530 #endif
1531 return tp;
1532 }
1533
1534 // Get a double constant from a ConstNode.
1535 // Returns the constant if it is a double ConstNode
getd() const1536 jdouble Node::getd() const {
1537 assert( Opcode() == Op_ConD, "" );
1538 return ((ConDNode*)this)->type()->is_double_constant()->getd();
1539 }
1540
1541 // Get a float constant from a ConstNode.
1542 // Returns the constant if it is a float ConstNode
getf() const1543 jfloat Node::getf() const {
1544 assert( Opcode() == Op_ConF, "" );
1545 return ((ConFNode*)this)->type()->is_float_constant()->getf();
1546 }
1547
1548 #ifndef PRODUCT
1549
1550 //------------------------------find------------------------------------------
1551 // Find a neighbor of this Node with the given _idx
1552 // If idx is negative, find its absolute value, following both _in and _out.
find_recur(Compile * C,Node * & result,Node * n,int idx,bool only_ctrl,VectorSet * old_space,VectorSet * new_space)1553 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl,
1554 VectorSet* old_space, VectorSet* new_space ) {
1555 int node_idx = (idx >= 0) ? idx : -idx;
1556 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc.
1557 // Contained in new_space or old_space? Check old_arena first since it's mostly empty.
1558 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
1559 if( v->test(n->_idx) ) return;
1560 if( (int)n->_idx == node_idx
1561 debug_only(|| n->debug_idx() == node_idx) ) {
1562 if (result != NULL)
1563 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
1564 (uintptr_t)result, (uintptr_t)n, node_idx);
1565 result = n;
1566 }
1567 v->set(n->_idx);
1568 for( uint i=0; i<n->len(); i++ ) {
1569 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
1570 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
1571 }
1572 // Search along forward edges also:
1573 if (idx < 0 && !only_ctrl) {
1574 for( uint j=0; j<n->outcnt(); j++ ) {
1575 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
1576 }
1577 }
1578 #ifdef ASSERT
1579 // Search along debug_orig edges last, checking for cycles
1580 Node* orig = n->debug_orig();
1581 if (orig != NULL) {
1582 do {
1583 if (NotANode(orig)) break;
1584 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
1585 orig = orig->debug_orig();
1586 } while (orig != NULL && orig != n->debug_orig());
1587 }
1588 #endif //ASSERT
1589 }
1590
1591 // call this from debugger:
find_node(Node * n,int idx)1592 Node* find_node(Node* n, int idx) {
1593 return n->find(idx);
1594 }
1595
1596 //------------------------------find-------------------------------------------
find(int idx) const1597 Node* Node::find(int idx) const {
1598 ResourceArea *area = Thread::current()->resource_area();
1599 VectorSet old_space(area), new_space(area);
1600 Node* result = NULL;
1601 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
1602 return result;
1603 }
1604
1605 //------------------------------find_ctrl--------------------------------------
1606 // Find an ancestor to this node in the control history with given _idx
find_ctrl(int idx) const1607 Node* Node::find_ctrl(int idx) const {
1608 ResourceArea *area = Thread::current()->resource_area();
1609 VectorSet old_space(area), new_space(area);
1610 Node* result = NULL;
1611 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
1612 return result;
1613 }
1614 #endif
1615
1616
1617
1618 #ifndef PRODUCT
1619
1620 // -----------------------------Name-------------------------------------------
1621 extern const char *NodeClassNames[];
Name() const1622 const char *Node::Name() const { return NodeClassNames[Opcode()]; }
1623
is_disconnected(const Node * n)1624 static bool is_disconnected(const Node* n) {
1625 for (uint i = 0; i < n->req(); i++) {
1626 if (n->in(i) != NULL) return false;
1627 }
1628 return true;
1629 }
1630
1631 #ifdef ASSERT
dump_orig(Node * orig,outputStream * st)1632 static void dump_orig(Node* orig, outputStream *st) {
1633 Compile* C = Compile::current();
1634 if (NotANode(orig)) orig = NULL;
1635 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1636 if (orig == NULL) return;
1637 st->print(" !orig=");
1638 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
1639 if (NotANode(fast)) fast = NULL;
1640 while (orig != NULL) {
1641 bool discon = is_disconnected(orig); // if discon, print [123] else 123
1642 if (discon) st->print("[");
1643 if (!Compile::current()->node_arena()->contains(orig))
1644 st->print("o");
1645 st->print("%d", orig->_idx);
1646 if (discon) st->print("]");
1647 orig = orig->debug_orig();
1648 if (NotANode(orig)) orig = NULL;
1649 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1650 if (orig != NULL) st->print(",");
1651 if (fast != NULL) {
1652 // Step fast twice for each single step of orig:
1653 fast = fast->debug_orig();
1654 if (NotANode(fast)) fast = NULL;
1655 if (fast != NULL && fast != orig) {
1656 fast = fast->debug_orig();
1657 if (NotANode(fast)) fast = NULL;
1658 }
1659 if (fast == orig) {
1660 st->print("...");
1661 break;
1662 }
1663 }
1664 }
1665 }
1666
set_debug_orig(Node * orig)1667 void Node::set_debug_orig(Node* orig) {
1668 _debug_orig = orig;
1669 if (BreakAtNode == 0) return;
1670 if (NotANode(orig)) orig = NULL;
1671 int trip = 10;
1672 while (orig != NULL) {
1673 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
1674 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
1675 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
1676 BREAKPOINT;
1677 }
1678 orig = orig->debug_orig();
1679 if (NotANode(orig)) orig = NULL;
1680 if (trip-- <= 0) break;
1681 }
1682 }
1683 #endif //ASSERT
1684
1685 //------------------------------dump------------------------------------------
1686 // Dump a Node
dump(const char * suffix,bool mark,outputStream * st) const1687 void Node::dump(const char* suffix, bool mark, outputStream *st) const {
1688 Compile* C = Compile::current();
1689 bool is_new = C->node_arena()->contains(this);
1690 C->_in_dump_cnt++;
1691 st->print("%c%d%s\t%s\t=== ", is_new ? ' ' : 'o', _idx, mark ? " >" : "", Name());
1692
1693 // Dump the required and precedence inputs
1694 dump_req(st);
1695 dump_prec(st);
1696 // Dump the outputs
1697 dump_out(st);
1698
1699 if (is_disconnected(this)) {
1700 #ifdef ASSERT
1701 st->print(" [%d]",debug_idx());
1702 dump_orig(debug_orig(), st);
1703 #endif
1704 st->cr();
1705 C->_in_dump_cnt--;
1706 return; // don't process dead nodes
1707 }
1708
1709 if (C->clone_map().value(_idx) != 0) {
1710 C->clone_map().dump(_idx);
1711 }
1712 // Dump node-specific info
1713 dump_spec(st);
1714 #ifdef ASSERT
1715 // Dump the non-reset _debug_idx
1716 if (Verbose && WizardMode) {
1717 st->print(" [%d]",debug_idx());
1718 }
1719 #endif
1720
1721 const Type *t = bottom_type();
1722
1723 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
1724 const TypeInstPtr *toop = t->isa_instptr();
1725 const TypeKlassPtr *tkls = t->isa_klassptr();
1726 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
1727 if (klass && klass->is_loaded() && klass->is_interface()) {
1728 st->print(" Interface:");
1729 } else if (toop) {
1730 st->print(" Oop:");
1731 } else if (tkls) {
1732 st->print(" Klass:");
1733 }
1734 t->dump_on(st);
1735 } else if (t == Type::MEMORY) {
1736 st->print(" Memory:");
1737 MemNode::dump_adr_type(this, adr_type(), st);
1738 } else if (Verbose || WizardMode) {
1739 st->print(" Type:");
1740 if (t) {
1741 t->dump_on(st);
1742 } else {
1743 st->print("no type");
1744 }
1745 } else if (t->isa_vect() && this->is_MachSpillCopy()) {
1746 // Dump MachSpillcopy vector type.
1747 t->dump_on(st);
1748 }
1749 if (is_new) {
1750 debug_only(dump_orig(debug_orig(), st));
1751 Node_Notes* nn = C->node_notes_at(_idx);
1752 if (nn != NULL && !nn->is_clear()) {
1753 if (nn->jvms() != NULL) {
1754 st->print(" !jvms:");
1755 nn->jvms()->dump_spec(st);
1756 }
1757 }
1758 }
1759 if (suffix) st->print("%s", suffix);
1760 C->_in_dump_cnt--;
1761 }
1762
1763 //------------------------------dump_req--------------------------------------
dump_req(outputStream * st) const1764 void Node::dump_req(outputStream *st) const {
1765 // Dump the required input edges
1766 for (uint i = 0; i < req(); i++) { // For all required inputs
1767 Node* d = in(i);
1768 if (d == NULL) {
1769 st->print("_ ");
1770 } else if (NotANode(d)) {
1771 st->print("NotANode "); // uninitialized, sentinel, garbage, etc.
1772 } else {
1773 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
1774 }
1775 }
1776 }
1777
1778
1779 //------------------------------dump_prec-------------------------------------
dump_prec(outputStream * st) const1780 void Node::dump_prec(outputStream *st) const {
1781 // Dump the precedence edges
1782 int any_prec = 0;
1783 for (uint i = req(); i < len(); i++) { // For all precedence inputs
1784 Node* p = in(i);
1785 if (p != NULL) {
1786 if (!any_prec++) st->print(" |");
1787 if (NotANode(p)) { st->print("NotANode "); continue; }
1788 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
1789 }
1790 }
1791 }
1792
1793 //------------------------------dump_out--------------------------------------
dump_out(outputStream * st) const1794 void Node::dump_out(outputStream *st) const {
1795 // Delimit the output edges
1796 st->print(" [[");
1797 // Dump the output edges
1798 for (uint i = 0; i < _outcnt; i++) { // For all outputs
1799 Node* u = _out[i];
1800 if (u == NULL) {
1801 st->print("_ ");
1802 } else if (NotANode(u)) {
1803 st->print("NotANode ");
1804 } else {
1805 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
1806 }
1807 }
1808 st->print("]] ");
1809 }
1810
1811 //----------------------------collect_nodes_i----------------------------------
1812 // Collects nodes from an Ideal graph, starting from a given start node and
1813 // moving in a given direction until a certain depth (distance from the start
1814 // node) is reached. Duplicates are ignored.
1815 // Arguments:
1816 // nstack: the nodes are collected into this array.
1817 // start: the node at which to start collecting.
1818 // direction: if this is a positive number, collect input nodes; if it is
1819 // a negative number, collect output nodes.
1820 // depth: collect nodes up to this distance from the start node.
1821 // include_start: whether to include the start node in the result collection.
1822 // only_ctrl: whether to regard control edges only during traversal.
1823 // only_data: whether to regard data edges only during traversal.
collect_nodes_i(GrowableArray<Node * > * nstack,const Node * start,int direction,uint depth,bool include_start,bool only_ctrl,bool only_data)1824 static void collect_nodes_i(GrowableArray<Node*> *nstack, const Node* start, int direction, uint depth, bool include_start, bool only_ctrl, bool only_data) {
1825 Node* s = (Node*) start; // remove const
1826 nstack->append(s);
1827 int begin = 0;
1828 int end = 0;
1829 for(uint i = 0; i < depth; i++) {
1830 end = nstack->length();
1831 for(int j = begin; j < end; j++) {
1832 Node* tp = nstack->at(j);
1833 uint limit = direction > 0 ? tp->len() : tp->outcnt();
1834 for(uint k = 0; k < limit; k++) {
1835 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
1836
1837 if (NotANode(n)) continue;
1838 // do not recurse through top or the root (would reach unrelated stuff)
1839 if (n->is_Root() || n->is_top()) continue;
1840 if (only_ctrl && !n->is_CFG()) continue;
1841 if (only_data && n->is_CFG()) continue;
1842
1843 bool on_stack = nstack->contains(n);
1844 if (!on_stack) {
1845 nstack->append(n);
1846 }
1847 }
1848 }
1849 begin = end;
1850 }
1851 if (!include_start) {
1852 nstack->remove(s);
1853 }
1854 }
1855
1856 //------------------------------dump_nodes-------------------------------------
dump_nodes(const Node * start,int d,bool only_ctrl)1857 static void dump_nodes(const Node* start, int d, bool only_ctrl) {
1858 if (NotANode(start)) return;
1859
1860 GrowableArray <Node *> nstack(Compile::current()->live_nodes());
1861 collect_nodes_i(&nstack, start, d, (uint) ABS(d), true, only_ctrl, false);
1862
1863 int end = nstack.length();
1864 if (d > 0) {
1865 for(int j = end-1; j >= 0; j--) {
1866 nstack.at(j)->dump();
1867 }
1868 } else {
1869 for(int j = 0; j < end; j++) {
1870 nstack.at(j)->dump();
1871 }
1872 }
1873 }
1874
1875 //------------------------------dump-------------------------------------------
dump(int d) const1876 void Node::dump(int d) const {
1877 dump_nodes(this, d, false);
1878 }
1879
1880 //------------------------------dump_ctrl--------------------------------------
1881 // Dump a Node's control history to depth
dump_ctrl(int d) const1882 void Node::dump_ctrl(int d) const {
1883 dump_nodes(this, d, true);
1884 }
1885
1886 //-----------------------------dump_compact------------------------------------
dump_comp() const1887 void Node::dump_comp() const {
1888 this->dump_comp("\n");
1889 }
1890
1891 //-----------------------------dump_compact------------------------------------
1892 // Dump a Node in compact representation, i.e., just print its name and index.
1893 // Nodes can specify additional specifics to print in compact representation by
1894 // implementing dump_compact_spec.
dump_comp(const char * suffix,outputStream * st) const1895 void Node::dump_comp(const char* suffix, outputStream *st) const {
1896 Compile* C = Compile::current();
1897 C->_in_dump_cnt++;
1898 st->print("%s(%d)", Name(), _idx);
1899 this->dump_compact_spec(st);
1900 if (suffix) {
1901 st->print("%s", suffix);
1902 }
1903 C->_in_dump_cnt--;
1904 }
1905
1906 //----------------------------dump_related-------------------------------------
1907 // Dump a Node's related nodes - the notion of "related" depends on the Node at
1908 // hand and is determined by the implementation of the virtual method rel.
dump_related() const1909 void Node::dump_related() const {
1910 Compile* C = Compile::current();
1911 GrowableArray <Node *> in_rel(C->unique());
1912 GrowableArray <Node *> out_rel(C->unique());
1913 this->related(&in_rel, &out_rel, false);
1914 for (int i = in_rel.length() - 1; i >= 0; i--) {
1915 in_rel.at(i)->dump();
1916 }
1917 this->dump("\n", true);
1918 for (int i = 0; i < out_rel.length(); i++) {
1919 out_rel.at(i)->dump();
1920 }
1921 }
1922
1923 //----------------------------dump_related-------------------------------------
1924 // Dump a Node's related nodes up to a given depth (distance from the start
1925 // node).
1926 // Arguments:
1927 // d_in: depth for input nodes.
1928 // d_out: depth for output nodes (note: this also is a positive number).
dump_related(uint d_in,uint d_out) const1929 void Node::dump_related(uint d_in, uint d_out) const {
1930 Compile* C = Compile::current();
1931 GrowableArray <Node *> in_rel(C->unique());
1932 GrowableArray <Node *> out_rel(C->unique());
1933
1934 // call collect_nodes_i directly
1935 collect_nodes_i(&in_rel, this, 1, d_in, false, false, false);
1936 collect_nodes_i(&out_rel, this, -1, d_out, false, false, false);
1937
1938 for (int i = in_rel.length() - 1; i >= 0; i--) {
1939 in_rel.at(i)->dump();
1940 }
1941 this->dump("\n", true);
1942 for (int i = 0; i < out_rel.length(); i++) {
1943 out_rel.at(i)->dump();
1944 }
1945 }
1946
1947 //------------------------dump_related_compact---------------------------------
1948 // Dump a Node's related nodes in compact representation. The notion of
1949 // "related" depends on the Node at hand and is determined by the implementation
1950 // of the virtual method rel.
dump_related_compact() const1951 void Node::dump_related_compact() const {
1952 Compile* C = Compile::current();
1953 GrowableArray <Node *> in_rel(C->unique());
1954 GrowableArray <Node *> out_rel(C->unique());
1955 this->related(&in_rel, &out_rel, true);
1956 int n_in = in_rel.length();
1957 int n_out = out_rel.length();
1958
1959 this->dump_comp(n_in == 0 ? "\n" : " ");
1960 for (int i = 0; i < n_in; i++) {
1961 in_rel.at(i)->dump_comp(i == n_in - 1 ? "\n" : " ");
1962 }
1963 for (int i = 0; i < n_out; i++) {
1964 out_rel.at(i)->dump_comp(i == n_out - 1 ? "\n" : " ");
1965 }
1966 }
1967
1968 //------------------------------related----------------------------------------
1969 // Collect a Node's related nodes. The default behaviour just collects the
1970 // inputs and outputs at depth 1, including both control and data flow edges,
1971 // regardless of whether the presentation is compact or not. For data nodes,
1972 // the default is to collect all data inputs (till level 1 if compact), and
1973 // outputs till level 1.
related(GrowableArray<Node * > * in_rel,GrowableArray<Node * > * out_rel,bool compact) const1974 void Node::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
1975 if (this->is_CFG()) {
1976 collect_nodes_i(in_rel, this, 1, 1, false, false, false);
1977 collect_nodes_i(out_rel, this, -1, 1, false, false, false);
1978 } else {
1979 if (compact) {
1980 this->collect_nodes(in_rel, 1, false, true);
1981 } else {
1982 this->collect_nodes_in_all_data(in_rel, false);
1983 }
1984 this->collect_nodes(out_rel, -1, false, false);
1985 }
1986 }
1987
1988 //---------------------------collect_nodes-------------------------------------
1989 // An entry point to the low-level node collection facility, to start from a
1990 // given node in the graph. The start node is by default not included in the
1991 // result.
1992 // Arguments:
1993 // ns: collect the nodes into this data structure.
1994 // d: the depth (distance from start node) to which nodes should be
1995 // collected. A value >0 indicates input nodes, a value <0, output
1996 // nodes.
1997 // ctrl: include only control nodes.
1998 // data: include only data nodes.
collect_nodes(GrowableArray<Node * > * ns,int d,bool ctrl,bool data) const1999 void Node::collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const {
2000 if (ctrl && data) {
2001 // ignore nonsensical combination
2002 return;
2003 }
2004 collect_nodes_i(ns, this, d, (uint) ABS(d), false, ctrl, data);
2005 }
2006
2007 //--------------------------collect_nodes_in-----------------------------------
collect_nodes_in(Node * start,GrowableArray<Node * > * ns,bool primary_is_data,bool collect_secondary)2008 static void collect_nodes_in(Node* start, GrowableArray<Node*> *ns, bool primary_is_data, bool collect_secondary) {
2009 // The maximum depth is determined using a BFS that visits all primary (data
2010 // or control) inputs and increments the depth at each level.
2011 uint d_in = 0;
2012 GrowableArray<Node*> nodes(Compile::current()->unique());
2013 nodes.push(start);
2014 int nodes_at_current_level = 1;
2015 int n_idx = 0;
2016 while (nodes_at_current_level > 0) {
2017 // Add all primary inputs reachable from the current level to the list, and
2018 // increase the depth if there were any.
2019 int nodes_at_next_level = 0;
2020 bool nodes_added = false;
2021 while (nodes_at_current_level > 0) {
2022 nodes_at_current_level--;
2023 Node* current = nodes.at(n_idx++);
2024 for (uint i = 0; i < current->len(); i++) {
2025 Node* n = current->in(i);
2026 if (NotANode(n)) {
2027 continue;
2028 }
2029 if ((primary_is_data && n->is_CFG()) || (!primary_is_data && !n->is_CFG())) {
2030 continue;
2031 }
2032 if (!nodes.contains(n)) {
2033 nodes.push(n);
2034 nodes_added = true;
2035 nodes_at_next_level++;
2036 }
2037 }
2038 }
2039 if (nodes_added) {
2040 d_in++;
2041 }
2042 nodes_at_current_level = nodes_at_next_level;
2043 }
2044 start->collect_nodes(ns, d_in, !primary_is_data, primary_is_data);
2045 if (collect_secondary) {
2046 // Now, iterate over the secondary nodes in ns and add the respective
2047 // boundary reachable from them.
2048 GrowableArray<Node*> sns(Compile::current()->unique());
2049 for (GrowableArrayIterator<Node*> it = ns->begin(); it != ns->end(); ++it) {
2050 Node* n = *it;
2051 n->collect_nodes(&sns, 1, primary_is_data, !primary_is_data);
2052 for (GrowableArrayIterator<Node*> d = sns.begin(); d != sns.end(); ++d) {
2053 ns->append_if_missing(*d);
2054 }
2055 sns.clear();
2056 }
2057 }
2058 }
2059
2060 //---------------------collect_nodes_in_all_data-------------------------------
2061 // Collect the entire data input graph. Include the control boundary if
2062 // requested.
2063 // Arguments:
2064 // ns: collect the nodes into this data structure.
2065 // ctrl: if true, include the control boundary.
collect_nodes_in_all_data(GrowableArray<Node * > * ns,bool ctrl) const2066 void Node::collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const {
2067 collect_nodes_in((Node*) this, ns, true, ctrl);
2068 }
2069
2070 //--------------------------collect_nodes_in_all_ctrl--------------------------
2071 // Collect the entire control input graph. Include the data boundary if
2072 // requested.
2073 // ns: collect the nodes into this data structure.
2074 // data: if true, include the control boundary.
collect_nodes_in_all_ctrl(GrowableArray<Node * > * ns,bool data) const2075 void Node::collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const {
2076 collect_nodes_in((Node*) this, ns, false, data);
2077 }
2078
2079 //------------------collect_nodes_out_all_ctrl_boundary------------------------
2080 // Collect the entire output graph until hitting control node boundaries, and
2081 // include those.
collect_nodes_out_all_ctrl_boundary(GrowableArray<Node * > * ns) const2082 void Node::collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const {
2083 // Perform a BFS and stop at control nodes.
2084 GrowableArray<Node*> nodes(Compile::current()->unique());
2085 nodes.push((Node*) this);
2086 while (nodes.length() > 0) {
2087 Node* current = nodes.pop();
2088 if (NotANode(current)) {
2089 continue;
2090 }
2091 ns->append_if_missing(current);
2092 if (!current->is_CFG()) {
2093 for (DUIterator i = current->outs(); current->has_out(i); i++) {
2094 nodes.push(current->out(i));
2095 }
2096 }
2097 }
2098 ns->remove((Node*) this);
2099 }
2100
2101 // VERIFICATION CODE
2102 // For each input edge to a node (ie - for each Use-Def edge), verify that
2103 // there is a corresponding Def-Use edge.
2104 //------------------------------verify_edges-----------------------------------
verify_edges(Unique_Node_List & visited)2105 void Node::verify_edges(Unique_Node_List &visited) {
2106 uint i, j, idx;
2107 int cnt;
2108 Node *n;
2109
2110 // Recursive termination test
2111 if (visited.member(this)) return;
2112 visited.push(this);
2113
2114 // Walk over all input edges, checking for correspondence
2115 for( i = 0; i < len(); i++ ) {
2116 n = in(i);
2117 if (n != NULL && !n->is_top()) {
2118 // Count instances of (Node *)this
2119 cnt = 0;
2120 for (idx = 0; idx < n->_outcnt; idx++ ) {
2121 if (n->_out[idx] == (Node *)this) cnt++;
2122 }
2123 assert( cnt > 0,"Failed to find Def-Use edge." );
2124 // Check for duplicate edges
2125 // walk the input array downcounting the input edges to n
2126 for( j = 0; j < len(); j++ ) {
2127 if( in(j) == n ) cnt--;
2128 }
2129 assert( cnt == 0,"Mismatched edge count.");
2130 } else if (n == NULL) {
2131 assert(i >= req() || i == 0 || is_Region() || is_Phi() || is_ArrayCopy()
2132 || (is_Unlock() && i == req()-1), "only region, phi, arraycopy or unlock nodes have null data edges");
2133 } else {
2134 assert(n->is_top(), "sanity");
2135 // Nothing to check.
2136 }
2137 }
2138 // Recursive walk over all input edges
2139 for( i = 0; i < len(); i++ ) {
2140 n = in(i);
2141 if( n != NULL )
2142 in(i)->verify_edges(visited);
2143 }
2144 }
2145
2146 // Verify all nodes if verify_depth is negative
verify(Node * n,int verify_depth)2147 void Node::verify(Node* n, int verify_depth) {
2148 assert(verify_depth != 0, "depth should not be 0");
2149 ResourceMark rm;
2150 ResourceArea* area = Thread::current()->resource_area();
2151 VectorSet old_space(area);
2152 VectorSet new_space(area);
2153 Node_List worklist(area);
2154 worklist.push(n);
2155 Compile* C = Compile::current();
2156 uint last_index_on_current_depth = 0;
2157 verify_depth--; // Visiting the first node on depth 1
2158 // Only add nodes to worklist if verify_depth is negative (visit all nodes) or greater than 0
2159 bool add_to_worklist = verify_depth != 0;
2160
2161
2162 for (uint list_index = 0; list_index < worklist.size(); list_index++) {
2163 n = worklist[list_index];
2164
2165 if (n->is_Con() && n->bottom_type() == Type::TOP) {
2166 if (C->cached_top_node() == NULL) {
2167 C->set_cached_top_node((Node*)n);
2168 }
2169 assert(C->cached_top_node() == n, "TOP node must be unique");
2170 }
2171
2172 for (uint i = 0; i < n->len(); i++) {
2173 Node* x = n->in(i);
2174 if (!x || x->is_top()) {
2175 continue;
2176 }
2177
2178 // Verify my input has a def-use edge to me
2179 // Count use-def edges from n to x
2180 int cnt = 0;
2181 for (uint j = 0; j < n->len(); j++) {
2182 if (n->in(j) == x) {
2183 cnt++;
2184 }
2185 }
2186
2187 // Count def-use edges from x to n
2188 uint max = x->_outcnt;
2189 for (uint k = 0; k < max; k++) {
2190 if (x->_out[k] == n) {
2191 cnt--;
2192 }
2193 }
2194 assert(cnt == 0, "mismatched def-use edge counts");
2195
2196 // Contained in new_space or old_space?
2197 VectorSet* v = C->node_arena()->contains(x) ? &new_space : &old_space;
2198 // Check for visited in the proper space. Numberings are not unique
2199 // across spaces so we need a separate VectorSet for each space.
2200 if (add_to_worklist && !v->test_set(x->_idx)) {
2201 worklist.push(x);
2202 }
2203 }
2204
2205 if (verify_depth > 0 && list_index == last_index_on_current_depth) {
2206 // All nodes on this depth were processed and its inputs are on the worklist. Decrement verify_depth and
2207 // store the current last list index which is the last node in the list with the new depth. All nodes
2208 // added afterwards will have a new depth again. Stop adding new nodes if depth limit is reached (=0).
2209 verify_depth--;
2210 if (verify_depth == 0) {
2211 add_to_worklist = false;
2212 }
2213 last_index_on_current_depth = worklist.size() - 1;
2214 }
2215 }
2216 }
2217 #endif
2218
2219 //------------------------------walk-------------------------------------------
2220 // Graph walk, with both pre-order and post-order functions
walk(NFunc pre,NFunc post,void * env)2221 void Node::walk(NFunc pre, NFunc post, void *env) {
2222 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
2223 walk_(pre, post, env, visited);
2224 }
2225
walk_(NFunc pre,NFunc post,void * env,VectorSet & visited)2226 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
2227 if( visited.test_set(_idx) ) return;
2228 pre(*this,env); // Call the pre-order walk function
2229 for( uint i=0; i<_max; i++ )
2230 if( in(i) ) // Input exists and is not walked?
2231 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions
2232 post(*this,env); // Call the post-order walk function
2233 }
2234
nop(Node &,void *)2235 void Node::nop(Node &, void*) {}
2236
2237 //------------------------------Registers--------------------------------------
2238 // Do we Match on this edge index or not? Generally false for Control
2239 // and true for everything else. Weird for calls & returns.
match_edge(uint idx) const2240 uint Node::match_edge(uint idx) const {
2241 return idx; // True for other than index 0 (control)
2242 }
2243
2244 static RegMask _not_used_at_all;
2245 // Register classes are defined for specific machines
out_RegMask() const2246 const RegMask &Node::out_RegMask() const {
2247 ShouldNotCallThis();
2248 return _not_used_at_all;
2249 }
2250
in_RegMask(uint) const2251 const RegMask &Node::in_RegMask(uint) const {
2252 ShouldNotCallThis();
2253 return _not_used_at_all;
2254 }
2255
2256 //=============================================================================
2257 //-----------------------------------------------------------------------------
reset(Arena * new_arena)2258 void Node_Array::reset( Arena *new_arena ) {
2259 _a->Afree(_nodes,_max*sizeof(Node*));
2260 _max = 0;
2261 _nodes = NULL;
2262 _a = new_arena;
2263 }
2264
2265 //------------------------------clear------------------------------------------
2266 // Clear all entries in _nodes to NULL but keep storage
clear()2267 void Node_Array::clear() {
2268 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) );
2269 }
2270
2271 //-----------------------------------------------------------------------------
grow(uint i)2272 void Node_Array::grow( uint i ) {
2273 if( !_max ) {
2274 _max = 1;
2275 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
2276 _nodes[0] = NULL;
2277 }
2278 uint old = _max;
2279 while( i >= _max ) _max <<= 1; // Double to fit
2280 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
2281 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
2282 }
2283
2284 //-----------------------------------------------------------------------------
insert(uint i,Node * n)2285 void Node_Array::insert( uint i, Node *n ) {
2286 if( _nodes[_max-1] ) grow(_max); // Get more space if full
2287 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*)));
2288 _nodes[i] = n;
2289 }
2290
2291 //-----------------------------------------------------------------------------
remove(uint i)2292 void Node_Array::remove( uint i ) {
2293 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*)));
2294 _nodes[_max-1] = NULL;
2295 }
2296
2297 //-----------------------------------------------------------------------------
sort(C_sort_func_t func)2298 void Node_Array::sort( C_sort_func_t func) {
2299 qsort( _nodes, _max, sizeof( Node* ), func );
2300 }
2301
2302 //-----------------------------------------------------------------------------
dump() const2303 void Node_Array::dump() const {
2304 #ifndef PRODUCT
2305 for( uint i = 0; i < _max; i++ ) {
2306 Node *nn = _nodes[i];
2307 if( nn != NULL ) {
2308 tty->print("%5d--> ",i); nn->dump();
2309 }
2310 }
2311 #endif
2312 }
2313
2314 //--------------------------is_iteratively_computed------------------------------
2315 // Operation appears to be iteratively computed (such as an induction variable)
2316 // It is possible for this operation to return false for a loop-varying
2317 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
is_iteratively_computed()2318 bool Node::is_iteratively_computed() {
2319 if (ideal_reg()) { // does operation have a result register?
2320 for (uint i = 1; i < req(); i++) {
2321 Node* n = in(i);
2322 if (n != NULL && n->is_Phi()) {
2323 for (uint j = 1; j < n->req(); j++) {
2324 if (n->in(j) == this) {
2325 return true;
2326 }
2327 }
2328 }
2329 }
2330 }
2331 return false;
2332 }
2333
2334 //--------------------------find_similar------------------------------
2335 // Return a node with opcode "opc" and same inputs as "this" if one can
2336 // be found; Otherwise return NULL;
find_similar(int opc)2337 Node* Node::find_similar(int opc) {
2338 if (req() >= 2) {
2339 Node* def = in(1);
2340 if (def && def->outcnt() >= 2) {
2341 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
2342 Node* use = def->fast_out(i);
2343 if (use != this &&
2344 use->Opcode() == opc &&
2345 use->req() == req()) {
2346 uint j;
2347 for (j = 0; j < use->req(); j++) {
2348 if (use->in(j) != in(j)) {
2349 break;
2350 }
2351 }
2352 if (j == use->req()) {
2353 return use;
2354 }
2355 }
2356 }
2357 }
2358 }
2359 return NULL;
2360 }
2361
2362
2363 //--------------------------unique_ctrl_out------------------------------
2364 // Return the unique control out if only one. Null if none or more than one.
unique_ctrl_out() const2365 Node* Node::unique_ctrl_out() const {
2366 Node* found = NULL;
2367 for (uint i = 0; i < outcnt(); i++) {
2368 Node* use = raw_out(i);
2369 if (use->is_CFG() && use != this) {
2370 if (found != NULL) return NULL;
2371 found = use;
2372 }
2373 }
2374 return found;
2375 }
2376
ensure_control_or_add_prec(Node * c)2377 void Node::ensure_control_or_add_prec(Node* c) {
2378 if (in(0) == NULL) {
2379 set_req(0, c);
2380 } else if (in(0) != c) {
2381 add_prec(c);
2382 }
2383 }
2384
is_dead_loop_safe() const2385 bool Node::is_dead_loop_safe() const {
2386 if (is_Phi()) {
2387 return true;
2388 }
2389 if (is_Proj() && in(0) == NULL) {
2390 return true;
2391 }
2392 if ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0) {
2393 if (!is_Proj()) {
2394 return true;
2395 }
2396 if (in(0)->is_Allocate()) {
2397 return false;
2398 }
2399 // MemNode::can_see_stored_value() peeks through the boxing call
2400 if (in(0)->is_CallStaticJava() && in(0)->as_CallStaticJava()->is_boxing_method()) {
2401 return false;
2402 }
2403 return true;
2404 }
2405 return false;
2406 }
2407
2408 //=============================================================================
2409 //------------------------------yank-------------------------------------------
2410 // Find and remove
yank(Node * n)2411 void Node_List::yank( Node *n ) {
2412 uint i;
2413 for( i = 0; i < _cnt; i++ )
2414 if( _nodes[i] == n )
2415 break;
2416
2417 if( i < _cnt )
2418 _nodes[i] = _nodes[--_cnt];
2419 }
2420
2421 //------------------------------dump-------------------------------------------
dump() const2422 void Node_List::dump() const {
2423 #ifndef PRODUCT
2424 for( uint i = 0; i < _cnt; i++ )
2425 if( _nodes[i] ) {
2426 tty->print("%5d--> ",i);
2427 _nodes[i]->dump();
2428 }
2429 #endif
2430 }
2431
dump_simple() const2432 void Node_List::dump_simple() const {
2433 #ifndef PRODUCT
2434 for( uint i = 0; i < _cnt; i++ )
2435 if( _nodes[i] ) {
2436 tty->print(" %d", _nodes[i]->_idx);
2437 } else {
2438 tty->print(" NULL");
2439 }
2440 #endif
2441 }
2442
2443 //=============================================================================
2444 //------------------------------remove-----------------------------------------
remove(Node * n)2445 void Unique_Node_List::remove( Node *n ) {
2446 if( _in_worklist[n->_idx] ) {
2447 for( uint i = 0; i < size(); i++ )
2448 if( _nodes[i] == n ) {
2449 map(i,Node_List::pop());
2450 _in_worklist >>= n->_idx;
2451 return;
2452 }
2453 ShouldNotReachHere();
2454 }
2455 }
2456
2457 //-----------------------remove_useless_nodes----------------------------------
2458 // Remove useless nodes from worklist
remove_useless_nodes(VectorSet & useful)2459 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
2460
2461 for( uint i = 0; i < size(); ++i ) {
2462 Node *n = at(i);
2463 assert( n != NULL, "Did not expect null entries in worklist");
2464 if( ! useful.test(n->_idx) ) {
2465 _in_worklist >>= n->_idx;
2466 map(i,Node_List::pop());
2467 // Node *replacement = Node_List::pop();
2468 // if( i != size() ) { // Check if removing last entry
2469 // _nodes[i] = replacement;
2470 // }
2471 --i; // Visit popped node
2472 // If it was last entry, loop terminates since size() was also reduced
2473 }
2474 }
2475 }
2476
2477 //=============================================================================
grow()2478 void Node_Stack::grow() {
2479 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
2480 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
2481 size_t max = old_max << 1; // max * 2
2482 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max);
2483 _inode_max = _inodes + max;
2484 _inode_top = _inodes + old_top; // restore _top
2485 }
2486
2487 // Node_Stack is used to map nodes.
find(uint idx) const2488 Node* Node_Stack::find(uint idx) const {
2489 uint sz = size();
2490 for (uint i=0; i < sz; i++) {
2491 if (idx == index_at(i) )
2492 return node_at(i);
2493 }
2494 return NULL;
2495 }
2496
2497 //=============================================================================
size_of() const2498 uint TypeNode::size_of() const { return sizeof(*this); }
2499 #ifndef PRODUCT
dump_spec(outputStream * st) const2500 void TypeNode::dump_spec(outputStream *st) const {
2501 if( !Verbose && !WizardMode ) {
2502 // standard dump does this in Verbose and WizardMode
2503 st->print(" #"); _type->dump_on(st);
2504 }
2505 }
2506
dump_compact_spec(outputStream * st) const2507 void TypeNode::dump_compact_spec(outputStream *st) const {
2508 st->print("#");
2509 _type->dump_on(st);
2510 }
2511 #endif
hash() const2512 uint TypeNode::hash() const {
2513 return Node::hash() + _type->hash();
2514 }
cmp(const Node & n) const2515 uint TypeNode::cmp( const Node &n ) const
2516 { return !Type::cmp( _type, ((TypeNode&)n)._type ); }
bottom_type() const2517 const Type *TypeNode::bottom_type() const { return _type; }
Value(PhaseGVN * phase) const2518 const Type* TypeNode::Value(PhaseGVN* phase) const { return _type; }
2519
2520 //------------------------------ideal_reg--------------------------------------
ideal_reg() const2521 uint TypeNode::ideal_reg() const {
2522 return _type->ideal_reg();
2523 }
2524