1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "ci/bcEscapeAnalyzer.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/c2/barrierSetC2.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "opto/callGenerator.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/escape.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/regalloc.hpp"
42 #include "opto/regmask.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/powerOfTwo.hpp"
47 #include "code/vmreg.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
size_of() const54 uint StartNode::size_of() const { return sizeof(*this); }
cmp(const Node & n) const55 bool StartNode::cmp( const Node &n ) const
56 { return _domain == ((StartNode&)n)._domain; }
bottom_type() const57 const Type *StartNode::bottom_type() const { return _domain; }
Value(PhaseGVN * phase) const58 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
59 #ifndef PRODUCT
dump_spec(outputStream * st) const60 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
dump_compact_spec(outputStream * st) const61 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
62 #endif
63
64 //------------------------------Ideal------------------------------------------
Ideal(PhaseGVN * phase,bool can_reshape)65 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
66 return remove_dead_region(phase, can_reshape) ? this : NULL;
67 }
68
69 //------------------------------calling_convention-----------------------------
calling_convention(BasicType * sig_bt,VMRegPair * parm_regs,uint argcnt) const70 void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
71 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
72 }
73
74 //------------------------------Registers--------------------------------------
in_RegMask(uint) const75 const RegMask &StartNode::in_RegMask(uint) const {
76 return RegMask::Empty;
77 }
78
79 //------------------------------match------------------------------------------
80 // Construct projections for incoming parameters, and their RegMask info
match(const ProjNode * proj,const Matcher * match)81 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
82 switch (proj->_con) {
83 case TypeFunc::Control:
84 case TypeFunc::I_O:
85 case TypeFunc::Memory:
86 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
87 case TypeFunc::FramePtr:
88 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
89 case TypeFunc::ReturnAdr:
90 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
91 case TypeFunc::Parms:
92 default: {
93 uint parm_num = proj->_con - TypeFunc::Parms;
94 const Type *t = _domain->field_at(proj->_con);
95 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
96 return new ConNode(Type::TOP);
97 uint ideal_reg = t->ideal_reg();
98 RegMask &rm = match->_calling_convention_mask[parm_num];
99 return new MachProjNode(this,proj->_con,rm,ideal_reg);
100 }
101 }
102 return NULL;
103 }
104
105 //------------------------------StartOSRNode----------------------------------
106 // The method start node for an on stack replacement adapter
107
108 //------------------------------osr_domain-----------------------------
osr_domain()109 const TypeTuple *StartOSRNode::osr_domain() {
110 const Type **fields = TypeTuple::fields(2);
111 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
112
113 return TypeTuple::make(TypeFunc::Parms+1, fields);
114 }
115
116 //=============================================================================
117 const char * const ParmNode::names[TypeFunc::Parms+1] = {
118 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
119 };
120
121 #ifndef PRODUCT
dump_spec(outputStream * st) const122 void ParmNode::dump_spec(outputStream *st) const {
123 if( _con < TypeFunc::Parms ) {
124 st->print("%s", names[_con]);
125 } else {
126 st->print("Parm%d: ",_con-TypeFunc::Parms);
127 // Verbose and WizardMode dump bottom_type for all nodes
128 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
129 }
130 }
131
dump_compact_spec(outputStream * st) const132 void ParmNode::dump_compact_spec(outputStream *st) const {
133 if (_con < TypeFunc::Parms) {
134 st->print("%s", names[_con]);
135 } else {
136 st->print("%d:", _con-TypeFunc::Parms);
137 // unconditionally dump bottom_type
138 bottom_type()->dump_on(st);
139 }
140 }
141
142 // For a ParmNode, all immediate inputs and outputs are considered relevant
143 // both in compact and standard representation.
related(GrowableArray<Node * > * in_rel,GrowableArray<Node * > * out_rel,bool compact) const144 void ParmNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
145 this->collect_nodes(in_rel, 1, false, false);
146 this->collect_nodes(out_rel, -1, false, false);
147 }
148 #endif
149
ideal_reg() const150 uint ParmNode::ideal_reg() const {
151 switch( _con ) {
152 case TypeFunc::Control : // fall through
153 case TypeFunc::I_O : // fall through
154 case TypeFunc::Memory : return 0;
155 case TypeFunc::FramePtr : // fall through
156 case TypeFunc::ReturnAdr: return Op_RegP;
157 default : assert( _con > TypeFunc::Parms, "" );
158 // fall through
159 case TypeFunc::Parms : {
160 // Type of argument being passed
161 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
162 return t->ideal_reg();
163 }
164 }
165 ShouldNotReachHere();
166 return 0;
167 }
168
169 //=============================================================================
ReturnNode(uint edges,Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * retadr)170 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
171 init_req(TypeFunc::Control,cntrl);
172 init_req(TypeFunc::I_O,i_o);
173 init_req(TypeFunc::Memory,memory);
174 init_req(TypeFunc::FramePtr,frameptr);
175 init_req(TypeFunc::ReturnAdr,retadr);
176 }
177
Ideal(PhaseGVN * phase,bool can_reshape)178 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
179 return remove_dead_region(phase, can_reshape) ? this : NULL;
180 }
181
Value(PhaseGVN * phase) const182 const Type* ReturnNode::Value(PhaseGVN* phase) const {
183 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
184 ? Type::TOP
185 : Type::BOTTOM;
186 }
187
188 // Do we Match on this edge index or not? No edges on return nodes
match_edge(uint idx) const189 uint ReturnNode::match_edge(uint idx) const {
190 return 0;
191 }
192
193
194 #ifndef PRODUCT
dump_req(outputStream * st) const195 void ReturnNode::dump_req(outputStream *st) const {
196 // Dump the required inputs, enclosed in '(' and ')'
197 uint i; // Exit value of loop
198 for (i = 0; i < req(); i++) { // For all required inputs
199 if (i == TypeFunc::Parms) st->print("returns");
200 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
201 else st->print("_ ");
202 }
203 }
204 #endif
205
206 //=============================================================================
RethrowNode(Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * ret_adr,Node * exception)207 RethrowNode::RethrowNode(
208 Node* cntrl,
209 Node* i_o,
210 Node* memory,
211 Node* frameptr,
212 Node* ret_adr,
213 Node* exception
214 ) : Node(TypeFunc::Parms + 1) {
215 init_req(TypeFunc::Control , cntrl );
216 init_req(TypeFunc::I_O , i_o );
217 init_req(TypeFunc::Memory , memory );
218 init_req(TypeFunc::FramePtr , frameptr );
219 init_req(TypeFunc::ReturnAdr, ret_adr);
220 init_req(TypeFunc::Parms , exception);
221 }
222
Ideal(PhaseGVN * phase,bool can_reshape)223 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
224 return remove_dead_region(phase, can_reshape) ? this : NULL;
225 }
226
Value(PhaseGVN * phase) const227 const Type* RethrowNode::Value(PhaseGVN* phase) const {
228 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
229 ? Type::TOP
230 : Type::BOTTOM;
231 }
232
match_edge(uint idx) const233 uint RethrowNode::match_edge(uint idx) const {
234 return 0;
235 }
236
237 #ifndef PRODUCT
dump_req(outputStream * st) const238 void RethrowNode::dump_req(outputStream *st) const {
239 // Dump the required inputs, enclosed in '(' and ')'
240 uint i; // Exit value of loop
241 for (i = 0; i < req(); i++) { // For all required inputs
242 if (i == TypeFunc::Parms) st->print("exception");
243 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
244 else st->print("_ ");
245 }
246 }
247 #endif
248
249 //=============================================================================
250 // Do we Match on this edge index or not? Match only target address & method
match_edge(uint idx) const251 uint TailCallNode::match_edge(uint idx) const {
252 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
253 }
254
255 //=============================================================================
256 // Do we Match on this edge index or not? Match only target address & oop
match_edge(uint idx) const257 uint TailJumpNode::match_edge(uint idx) const {
258 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
259 }
260
261 //=============================================================================
JVMState(ciMethod * method,JVMState * caller)262 JVMState::JVMState(ciMethod* method, JVMState* caller) :
263 _method(method) {
264 assert(method != NULL, "must be valid call site");
265 _bci = InvocationEntryBci;
266 _reexecute = Reexecute_Undefined;
267 debug_only(_bci = -99); // random garbage value
268 debug_only(_map = (SafePointNode*)-1);
269 _caller = caller;
270 _depth = 1 + (caller == NULL ? 0 : caller->depth());
271 _locoff = TypeFunc::Parms;
272 _stkoff = _locoff + _method->max_locals();
273 _monoff = _stkoff + _method->max_stack();
274 _scloff = _monoff;
275 _endoff = _monoff;
276 _sp = 0;
277 }
JVMState(int stack_size)278 JVMState::JVMState(int stack_size) :
279 _method(NULL) {
280 _bci = InvocationEntryBci;
281 _reexecute = Reexecute_Undefined;
282 debug_only(_map = (SafePointNode*)-1);
283 _caller = NULL;
284 _depth = 1;
285 _locoff = TypeFunc::Parms;
286 _stkoff = _locoff;
287 _monoff = _stkoff + stack_size;
288 _scloff = _monoff;
289 _endoff = _monoff;
290 _sp = 0;
291 }
292
293 //--------------------------------of_depth-------------------------------------
of_depth(int d) const294 JVMState* JVMState::of_depth(int d) const {
295 const JVMState* jvmp = this;
296 assert(0 < d && (uint)d <= depth(), "oob");
297 for (int skip = depth() - d; skip > 0; skip--) {
298 jvmp = jvmp->caller();
299 }
300 assert(jvmp->depth() == (uint)d, "found the right one");
301 return (JVMState*)jvmp;
302 }
303
304 //-----------------------------same_calls_as-----------------------------------
same_calls_as(const JVMState * that) const305 bool JVMState::same_calls_as(const JVMState* that) const {
306 if (this == that) return true;
307 if (this->depth() != that->depth()) return false;
308 const JVMState* p = this;
309 const JVMState* q = that;
310 for (;;) {
311 if (p->_method != q->_method) return false;
312 if (p->_method == NULL) return true; // bci is irrelevant
313 if (p->_bci != q->_bci) return false;
314 if (p->_reexecute != q->_reexecute) return false;
315 p = p->caller();
316 q = q->caller();
317 if (p == q) return true;
318 assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
319 }
320 }
321
322 //------------------------------debug_start------------------------------------
debug_start() const323 uint JVMState::debug_start() const {
324 debug_only(JVMState* jvmroot = of_depth(1));
325 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
326 return of_depth(1)->locoff();
327 }
328
329 //-------------------------------debug_end-------------------------------------
debug_end() const330 uint JVMState::debug_end() const {
331 debug_only(JVMState* jvmroot = of_depth(1));
332 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
333 return endoff();
334 }
335
336 //------------------------------debug_depth------------------------------------
debug_depth() const337 uint JVMState::debug_depth() const {
338 uint total = 0;
339 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
340 total += jvmp->debug_size();
341 }
342 return total;
343 }
344
345 #ifndef PRODUCT
346
347 //------------------------------format_helper----------------------------------
348 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
349 // any defined value or not. If it does, print out the register or constant.
format_helper(PhaseRegAlloc * regalloc,outputStream * st,Node * n,const char * msg,uint i,GrowableArray<SafePointScalarObjectNode * > * scobjs)350 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
351 if (n == NULL) { st->print(" NULL"); return; }
352 if (n->is_SafePointScalarObject()) {
353 // Scalar replacement.
354 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
355 scobjs->append_if_missing(spobj);
356 int sco_n = scobjs->find(spobj);
357 assert(sco_n >= 0, "");
358 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
359 return;
360 }
361 if (regalloc->node_regs_max_index() > 0 &&
362 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
363 char buf[50];
364 regalloc->dump_register(n,buf);
365 st->print(" %s%d]=%s",msg,i,buf);
366 } else { // No register, but might be constant
367 const Type *t = n->bottom_type();
368 switch (t->base()) {
369 case Type::Int:
370 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con());
371 break;
372 case Type::AnyPtr:
373 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
374 st->print(" %s%d]=#NULL",msg,i);
375 break;
376 case Type::AryPtr:
377 case Type::InstPtr:
378 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop()));
379 break;
380 case Type::KlassPtr:
381 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->klass()));
382 break;
383 case Type::MetadataPtr:
384 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata()));
385 break;
386 case Type::NarrowOop:
387 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop()));
388 break;
389 case Type::RawPtr:
390 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr()));
391 break;
392 case Type::DoubleCon:
393 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
394 break;
395 case Type::FloatCon:
396 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
397 break;
398 case Type::Long:
399 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con()));
400 break;
401 case Type::Half:
402 case Type::Top:
403 st->print(" %s%d]=_",msg,i);
404 break;
405 default: ShouldNotReachHere();
406 }
407 }
408 }
409
410 //---------------------print_method_with_lineno--------------------------------
print_method_with_lineno(outputStream * st,bool show_name) const411 void JVMState::print_method_with_lineno(outputStream* st, bool show_name) const {
412 if (show_name) _method->print_short_name(st);
413
414 int lineno = _method->line_number_from_bci(_bci);
415 if (lineno != -1) {
416 st->print(" @ bci:%d (line %d)", _bci, lineno);
417 } else {
418 st->print(" @ bci:%d", _bci);
419 }
420 }
421
422 //------------------------------format-----------------------------------------
format(PhaseRegAlloc * regalloc,const Node * n,outputStream * st) const423 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
424 st->print(" #");
425 if (_method) {
426 print_method_with_lineno(st, true);
427 } else {
428 st->print_cr(" runtime stub ");
429 return;
430 }
431 if (n->is_MachSafePoint()) {
432 GrowableArray<SafePointScalarObjectNode*> scobjs;
433 MachSafePointNode *mcall = n->as_MachSafePoint();
434 uint i;
435 // Print locals
436 for (i = 0; i < (uint)loc_size(); i++)
437 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
438 // Print stack
439 for (i = 0; i < (uint)stk_size(); i++) {
440 if ((uint)(_stkoff + i) >= mcall->len())
441 st->print(" oob ");
442 else
443 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
444 }
445 for (i = 0; (int)i < nof_monitors(); i++) {
446 Node *box = mcall->monitor_box(this, i);
447 Node *obj = mcall->monitor_obj(this, i);
448 if (regalloc->node_regs_max_index() > 0 &&
449 OptoReg::is_valid(regalloc->get_reg_first(box))) {
450 box = BoxLockNode::box_node(box);
451 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
452 } else {
453 OptoReg::Name box_reg = BoxLockNode::reg(box);
454 st->print(" MON-BOX%d=%s+%d",
455 i,
456 OptoReg::regname(OptoReg::c_frame_pointer),
457 regalloc->reg2offset(box_reg));
458 }
459 const char* obj_msg = "MON-OBJ[";
460 if (EliminateLocks) {
461 if (BoxLockNode::box_node(box)->is_eliminated())
462 obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
463 }
464 format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
465 }
466
467 for (i = 0; i < (uint)scobjs.length(); i++) {
468 // Scalar replaced objects.
469 st->cr();
470 st->print(" # ScObj" INT32_FORMAT " ", i);
471 SafePointScalarObjectNode* spobj = scobjs.at(i);
472 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
473 assert(cik->is_instance_klass() ||
474 cik->is_array_klass(), "Not supported allocation.");
475 ciInstanceKlass *iklass = NULL;
476 if (cik->is_instance_klass()) {
477 cik->print_name_on(st);
478 iklass = cik->as_instance_klass();
479 } else if (cik->is_type_array_klass()) {
480 cik->as_array_klass()->base_element_type()->print_name_on(st);
481 st->print("[%d]", spobj->n_fields());
482 } else if (cik->is_obj_array_klass()) {
483 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
484 if (cie->is_instance_klass()) {
485 cie->print_name_on(st);
486 } else if (cie->is_type_array_klass()) {
487 cie->as_array_klass()->base_element_type()->print_name_on(st);
488 } else {
489 ShouldNotReachHere();
490 }
491 st->print("[%d]", spobj->n_fields());
492 int ndim = cik->as_array_klass()->dimension() - 1;
493 while (ndim-- > 0) {
494 st->print("[]");
495 }
496 }
497 st->print("={");
498 uint nf = spobj->n_fields();
499 if (nf > 0) {
500 uint first_ind = spobj->first_index(mcall->jvms());
501 Node* fld_node = mcall->in(first_ind);
502 ciField* cifield;
503 if (iklass != NULL) {
504 st->print(" [");
505 cifield = iklass->nonstatic_field_at(0);
506 cifield->print_name_on(st);
507 format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
508 } else {
509 format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
510 }
511 for (uint j = 1; j < nf; j++) {
512 fld_node = mcall->in(first_ind+j);
513 if (iklass != NULL) {
514 st->print(", [");
515 cifield = iklass->nonstatic_field_at(j);
516 cifield->print_name_on(st);
517 format_helper(regalloc, st, fld_node, ":", j, &scobjs);
518 } else {
519 format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
520 }
521 }
522 }
523 st->print(" }");
524 }
525 }
526 st->cr();
527 if (caller() != NULL) caller()->format(regalloc, n, st);
528 }
529
530
dump_spec(outputStream * st) const531 void JVMState::dump_spec(outputStream *st) const {
532 if (_method != NULL) {
533 bool printed = false;
534 if (!Verbose) {
535 // The JVMS dumps make really, really long lines.
536 // Take out the most boring parts, which are the package prefixes.
537 char buf[500];
538 stringStream namest(buf, sizeof(buf));
539 _method->print_short_name(&namest);
540 if (namest.count() < sizeof(buf)) {
541 const char* name = namest.base();
542 if (name[0] == ' ') ++name;
543 const char* endcn = strchr(name, ':'); // end of class name
544 if (endcn == NULL) endcn = strchr(name, '(');
545 if (endcn == NULL) endcn = name + strlen(name);
546 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
547 --endcn;
548 st->print(" %s", endcn);
549 printed = true;
550 }
551 }
552 print_method_with_lineno(st, !printed);
553 if(_reexecute == Reexecute_True)
554 st->print(" reexecute");
555 } else {
556 st->print(" runtime stub");
557 }
558 if (caller() != NULL) caller()->dump_spec(st);
559 }
560
561
dump_on(outputStream * st) const562 void JVMState::dump_on(outputStream* st) const {
563 bool print_map = _map && !((uintptr_t)_map & 1) &&
564 ((caller() == NULL) || (caller()->map() != _map));
565 if (print_map) {
566 if (_map->len() > _map->req()) { // _map->has_exceptions()
567 Node* ex = _map->in(_map->req()); // _map->next_exception()
568 // skip the first one; it's already being printed
569 while (ex != NULL && ex->len() > ex->req()) {
570 ex = ex->in(ex->req()); // ex->next_exception()
571 ex->dump(1);
572 }
573 }
574 _map->dump(Verbose ? 2 : 1);
575 }
576 if (caller() != NULL) {
577 caller()->dump_on(st);
578 }
579 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
580 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
581 if (_method == NULL) {
582 st->print_cr("(none)");
583 } else {
584 _method->print_name(st);
585 st->cr();
586 if (bci() >= 0 && bci() < _method->code_size()) {
587 st->print(" bc: ");
588 _method->print_codes_on(bci(), bci()+1, st);
589 }
590 }
591 }
592
593 // Extra way to dump a jvms from the debugger,
594 // to avoid a bug with C++ member function calls.
dump_jvms(JVMState * jvms)595 void dump_jvms(JVMState* jvms) {
596 jvms->dump();
597 }
598 #endif
599
600 //--------------------------clone_shallow--------------------------------------
clone_shallow(Compile * C) const601 JVMState* JVMState::clone_shallow(Compile* C) const {
602 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
603 n->set_bci(_bci);
604 n->_reexecute = _reexecute;
605 n->set_locoff(_locoff);
606 n->set_stkoff(_stkoff);
607 n->set_monoff(_monoff);
608 n->set_scloff(_scloff);
609 n->set_endoff(_endoff);
610 n->set_sp(_sp);
611 n->set_map(_map);
612 return n;
613 }
614
615 //---------------------------clone_deep----------------------------------------
clone_deep(Compile * C) const616 JVMState* JVMState::clone_deep(Compile* C) const {
617 JVMState* n = clone_shallow(C);
618 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
619 p->_caller = p->_caller->clone_shallow(C);
620 }
621 assert(n->depth() == depth(), "sanity");
622 assert(n->debug_depth() == debug_depth(), "sanity");
623 return n;
624 }
625
626 /**
627 * Reset map for all callers
628 */
set_map_deep(SafePointNode * map)629 void JVMState::set_map_deep(SafePointNode* map) {
630 for (JVMState* p = this; p != NULL; p = p->_caller) {
631 p->set_map(map);
632 }
633 }
634
635 // Adapt offsets in in-array after adding or removing an edge.
636 // Prerequisite is that the JVMState is used by only one node.
adapt_position(int delta)637 void JVMState::adapt_position(int delta) {
638 for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) {
639 jvms->set_locoff(jvms->locoff() + delta);
640 jvms->set_stkoff(jvms->stkoff() + delta);
641 jvms->set_monoff(jvms->monoff() + delta);
642 jvms->set_scloff(jvms->scloff() + delta);
643 jvms->set_endoff(jvms->endoff() + delta);
644 }
645 }
646
647 // Mirror the stack size calculation in the deopt code
648 // How much stack space would we need at this point in the program in
649 // case of deoptimization?
interpreter_frame_size() const650 int JVMState::interpreter_frame_size() const {
651 const JVMState* jvms = this;
652 int size = 0;
653 int callee_parameters = 0;
654 int callee_locals = 0;
655 int extra_args = method()->max_stack() - stk_size();
656
657 while (jvms != NULL) {
658 int locks = jvms->nof_monitors();
659 int temps = jvms->stk_size();
660 bool is_top_frame = (jvms == this);
661 ciMethod* method = jvms->method();
662
663 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
664 temps + callee_parameters,
665 extra_args,
666 locks,
667 callee_parameters,
668 callee_locals,
669 is_top_frame);
670 size += frame_size;
671
672 callee_parameters = method->size_of_parameters();
673 callee_locals = method->max_locals();
674 extra_args = 0;
675 jvms = jvms->caller();
676 }
677 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
678 }
679
680 //=============================================================================
cmp(const Node & n) const681 bool CallNode::cmp( const Node &n ) const
682 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
683 #ifndef PRODUCT
dump_req(outputStream * st) const684 void CallNode::dump_req(outputStream *st) const {
685 // Dump the required inputs, enclosed in '(' and ')'
686 uint i; // Exit value of loop
687 for (i = 0; i < req(); i++) { // For all required inputs
688 if (i == TypeFunc::Parms) st->print("(");
689 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
690 else st->print("_ ");
691 }
692 st->print(")");
693 }
694
dump_spec(outputStream * st) const695 void CallNode::dump_spec(outputStream *st) const {
696 st->print(" ");
697 if (tf() != NULL) tf()->dump_on(st);
698 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
699 if (jvms() != NULL) jvms()->dump_spec(st);
700 }
701 #endif
702
bottom_type() const703 const Type *CallNode::bottom_type() const { return tf()->range(); }
Value(PhaseGVN * phase) const704 const Type* CallNode::Value(PhaseGVN* phase) const {
705 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
706 return tf()->range();
707 }
708
709 //------------------------------calling_convention-----------------------------
calling_convention(BasicType * sig_bt,VMRegPair * parm_regs,uint argcnt) const710 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
711 // Use the standard compiler calling convention
712 SharedRuntime::java_calling_convention(sig_bt, parm_regs, argcnt);
713 }
714
715
716 //------------------------------match------------------------------------------
717 // Construct projections for control, I/O, memory-fields, ..., and
718 // return result(s) along with their RegMask info
match(const ProjNode * proj,const Matcher * match)719 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
720 switch (proj->_con) {
721 case TypeFunc::Control:
722 case TypeFunc::I_O:
723 case TypeFunc::Memory:
724 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
725
726 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
727 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
728 // 2nd half of doubles and longs
729 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
730
731 case TypeFunc::Parms: { // Normal returns
732 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
733 OptoRegPair regs = is_CallRuntime()
734 ? match->c_return_value(ideal_reg) // Calls into C runtime
735 : match-> return_value(ideal_reg); // Calls into compiled Java code
736 RegMask rm = RegMask(regs.first());
737 if( OptoReg::is_valid(regs.second()) )
738 rm.Insert( regs.second() );
739 return new MachProjNode(this,proj->_con,rm,ideal_reg);
740 }
741
742 case TypeFunc::ReturnAdr:
743 case TypeFunc::FramePtr:
744 default:
745 ShouldNotReachHere();
746 }
747 return NULL;
748 }
749
750 // Do we Match on this edge index or not? Match no edges
match_edge(uint idx) const751 uint CallNode::match_edge(uint idx) const {
752 return 0;
753 }
754
755 //
756 // Determine whether the call could modify the field of the specified
757 // instance at the specified offset.
758 //
may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)759 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
760 assert((t_oop != NULL), "sanity");
761 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
762 const TypeTuple* args = _tf->domain();
763 Node* dest = NULL;
764 // Stubs that can be called once an ArrayCopyNode is expanded have
765 // different signatures. Look for the second pointer argument,
766 // that is the destination of the copy.
767 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
768 if (args->field_at(i)->isa_ptr()) {
769 j++;
770 if (j == 2) {
771 dest = in(i);
772 break;
773 }
774 }
775 }
776 guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
777 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
778 return true;
779 }
780 return false;
781 }
782 if (t_oop->is_known_instance()) {
783 // The instance_id is set only for scalar-replaceable allocations which
784 // are not passed as arguments according to Escape Analysis.
785 return false;
786 }
787 if (t_oop->is_ptr_to_boxed_value()) {
788 ciKlass* boxing_klass = t_oop->klass();
789 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
790 // Skip unrelated boxing methods.
791 Node* proj = proj_out_or_null(TypeFunc::Parms);
792 if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
793 return false;
794 }
795 }
796 if (is_CallJava() && as_CallJava()->method() != NULL) {
797 ciMethod* meth = as_CallJava()->method();
798 if (meth->is_getter()) {
799 return false;
800 }
801 // May modify (by reflection) if an boxing object is passed
802 // as argument or returned.
803 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
804 if (proj != NULL) {
805 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
806 if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
807 (inst_t->klass() == boxing_klass))) {
808 return true;
809 }
810 }
811 const TypeTuple* d = tf()->domain();
812 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
813 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
814 if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
815 (inst_t->klass() == boxing_klass))) {
816 return true;
817 }
818 }
819 return false;
820 }
821 }
822 return true;
823 }
824
825 // Does this call have a direct reference to n other than debug information?
has_non_debug_use(Node * n)826 bool CallNode::has_non_debug_use(Node *n) {
827 const TypeTuple * d = tf()->domain();
828 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
829 Node *arg = in(i);
830 if (arg == n) {
831 return true;
832 }
833 }
834 return false;
835 }
836
837 // Returns the unique CheckCastPP of a call
838 // or 'this' if there are several CheckCastPP or unexpected uses
839 // or returns NULL if there is no one.
result_cast()840 Node *CallNode::result_cast() {
841 Node *cast = NULL;
842
843 Node *p = proj_out_or_null(TypeFunc::Parms);
844 if (p == NULL)
845 return NULL;
846
847 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
848 Node *use = p->fast_out(i);
849 if (use->is_CheckCastPP()) {
850 if (cast != NULL) {
851 return this; // more than 1 CheckCastPP
852 }
853 cast = use;
854 } else if (!use->is_Initialize() &&
855 !use->is_AddP() &&
856 use->Opcode() != Op_MemBarStoreStore) {
857 // Expected uses are restricted to a CheckCastPP, an Initialize
858 // node, a MemBarStoreStore (clone) and AddP nodes. If we
859 // encounter any other use (a Phi node can be seen in rare
860 // cases) return this to prevent incorrect optimizations.
861 return this;
862 }
863 }
864 return cast;
865 }
866
867
extract_projections(CallProjections * projs,bool separate_io_proj,bool do_asserts)868 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
869 projs->fallthrough_proj = NULL;
870 projs->fallthrough_catchproj = NULL;
871 projs->fallthrough_ioproj = NULL;
872 projs->catchall_ioproj = NULL;
873 projs->catchall_catchproj = NULL;
874 projs->fallthrough_memproj = NULL;
875 projs->catchall_memproj = NULL;
876 projs->resproj = NULL;
877 projs->exobj = NULL;
878
879 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
880 ProjNode *pn = fast_out(i)->as_Proj();
881 if (pn->outcnt() == 0) continue;
882 switch (pn->_con) {
883 case TypeFunc::Control:
884 {
885 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
886 projs->fallthrough_proj = pn;
887 const Node *cn = pn->unique_ctrl_out();
888 if (cn != NULL && cn->is_Catch()) {
889 ProjNode *cpn = NULL;
890 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
891 cpn = cn->fast_out(k)->as_Proj();
892 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
893 if (cpn->_con == CatchProjNode::fall_through_index)
894 projs->fallthrough_catchproj = cpn;
895 else {
896 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
897 projs->catchall_catchproj = cpn;
898 }
899 }
900 }
901 break;
902 }
903 case TypeFunc::I_O:
904 if (pn->_is_io_use)
905 projs->catchall_ioproj = pn;
906 else
907 projs->fallthrough_ioproj = pn;
908 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
909 Node* e = pn->out(j);
910 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
911 assert(projs->exobj == NULL, "only one");
912 projs->exobj = e;
913 }
914 }
915 break;
916 case TypeFunc::Memory:
917 if (pn->_is_io_use)
918 projs->catchall_memproj = pn;
919 else
920 projs->fallthrough_memproj = pn;
921 break;
922 case TypeFunc::Parms:
923 projs->resproj = pn;
924 break;
925 default:
926 assert(false, "unexpected projection from allocation node.");
927 }
928 }
929
930 // The resproj may not exist because the result could be ignored
931 // and the exception object may not exist if an exception handler
932 // swallows the exception but all the other must exist and be found.
933 assert(projs->fallthrough_proj != NULL, "must be found");
934 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
935 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
936 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found");
937 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found");
938 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found");
939 if (separate_io_proj) {
940 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found");
941 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found");
942 }
943 }
944
Ideal(PhaseGVN * phase,bool can_reshape)945 Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
946 #ifdef ASSERT
947 // Validate attached generator
948 CallGenerator* cg = generator();
949 if (cg != NULL) {
950 assert(is_CallStaticJava() && cg->is_mh_late_inline() ||
951 is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
952 }
953 #endif // ASSERT
954 return SafePointNode::Ideal(phase, can_reshape);
955 }
956
is_call_to_arraycopystub() const957 bool CallNode::is_call_to_arraycopystub() const {
958 if (_name != NULL && strstr(_name, "arraycopy") != 0) {
959 return true;
960 }
961 return false;
962 }
963
964 //=============================================================================
size_of() const965 uint CallJavaNode::size_of() const { return sizeof(*this); }
cmp(const Node & n) const966 bool CallJavaNode::cmp( const Node &n ) const {
967 CallJavaNode &call = (CallJavaNode&)n;
968 return CallNode::cmp(call) && _method == call._method &&
969 _override_symbolic_info == call._override_symbolic_info;
970 }
971
copy_call_debug_info(PhaseIterGVN * phase,SafePointNode * sfpt)972 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {
973 // Copy debug information and adjust JVMState information
974 uint old_dbg_start = sfpt->is_Call() ? sfpt->as_Call()->tf()->domain()->cnt() : (uint)TypeFunc::Parms+1;
975 uint new_dbg_start = tf()->domain()->cnt();
976 int jvms_adj = new_dbg_start - old_dbg_start;
977 assert (new_dbg_start == req(), "argument count mismatch");
978 Compile* C = phase->C;
979
980 // SafePointScalarObject node could be referenced several times in debug info.
981 // Use Dict to record cloned nodes.
982 Dict* sosn_map = new Dict(cmpkey,hashkey);
983 for (uint i = old_dbg_start; i < sfpt->req(); i++) {
984 Node* old_in = sfpt->in(i);
985 // Clone old SafePointScalarObjectNodes, adjusting their field contents.
986 if (old_in != NULL && old_in->is_SafePointScalarObject()) {
987 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
988 bool new_node;
989 Node* new_in = old_sosn->clone(sosn_map, new_node);
990 if (new_node) { // New node?
991 new_in->set_req(0, C->root()); // reset control edge
992 new_in = phase->transform(new_in); // Register new node.
993 }
994 old_in = new_in;
995 }
996 add_req(old_in);
997 }
998
999 // JVMS may be shared so clone it before we modify it
1000 set_jvms(sfpt->jvms() != NULL ? sfpt->jvms()->clone_deep(C) : NULL);
1001 for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1002 jvms->set_map(this);
1003 jvms->set_locoff(jvms->locoff()+jvms_adj);
1004 jvms->set_stkoff(jvms->stkoff()+jvms_adj);
1005 jvms->set_monoff(jvms->monoff()+jvms_adj);
1006 jvms->set_scloff(jvms->scloff()+jvms_adj);
1007 jvms->set_endoff(jvms->endoff()+jvms_adj);
1008 }
1009 }
1010
1011 #ifdef ASSERT
validate_symbolic_info() const1012 bool CallJavaNode::validate_symbolic_info() const {
1013 if (method() == NULL) {
1014 return true; // call into runtime or uncommon trap
1015 }
1016 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(_bci);
1017 ciMethod* callee = method();
1018 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1019 assert(override_symbolic_info(), "should be set");
1020 }
1021 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1022 return true;
1023 }
1024 #endif
1025
1026 #ifndef PRODUCT
dump_spec(outputStream * st) const1027 void CallJavaNode::dump_spec(outputStream* st) const {
1028 if( _method ) _method->print_short_name(st);
1029 CallNode::dump_spec(st);
1030 }
1031
dump_compact_spec(outputStream * st) const1032 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1033 if (_method) {
1034 _method->print_short_name(st);
1035 } else {
1036 st->print("<?>");
1037 }
1038 }
1039 #endif
1040
1041 //=============================================================================
size_of() const1042 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
cmp(const Node & n) const1043 bool CallStaticJavaNode::cmp( const Node &n ) const {
1044 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
1045 return CallJavaNode::cmp(call);
1046 }
1047
Ideal(PhaseGVN * phase,bool can_reshape)1048 Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1049 CallGenerator* cg = generator();
1050 if (can_reshape && cg != NULL) {
1051 assert(IncrementalInlineMH, "required");
1052 assert(cg->call_node() == this, "mismatch");
1053 assert(cg->is_mh_late_inline(), "not virtual");
1054
1055 // Check whether this MH handle call becomes a candidate for inlining.
1056 ciMethod* callee = cg->method();
1057 vmIntrinsics::ID iid = callee->intrinsic_id();
1058 if (iid == vmIntrinsics::_invokeBasic) {
1059 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
1060 phase->C->prepend_late_inline(cg);
1061 set_generator(NULL);
1062 }
1063 } else {
1064 assert(callee->has_member_arg(), "wrong type of call?");
1065 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
1066 phase->C->prepend_late_inline(cg);
1067 set_generator(NULL);
1068 }
1069 }
1070 }
1071 return CallNode::Ideal(phase, can_reshape);
1072 }
1073
1074 //----------------------------uncommon_trap_request----------------------------
1075 // If this is an uncommon trap, return the request code, else zero.
uncommon_trap_request() const1076 int CallStaticJavaNode::uncommon_trap_request() const {
1077 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
1078 return extract_uncommon_trap_request(this);
1079 }
1080 return 0;
1081 }
extract_uncommon_trap_request(const Node * call)1082 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
1083 #ifndef PRODUCT
1084 if (!(call->req() > TypeFunc::Parms &&
1085 call->in(TypeFunc::Parms) != NULL &&
1086 call->in(TypeFunc::Parms)->is_Con() &&
1087 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
1088 assert(in_dump() != 0, "OK if dumping");
1089 tty->print("[bad uncommon trap]");
1090 return 0;
1091 }
1092 #endif
1093 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
1094 }
1095
1096 #ifndef PRODUCT
dump_spec(outputStream * st) const1097 void CallStaticJavaNode::dump_spec(outputStream *st) const {
1098 st->print("# Static ");
1099 if (_name != NULL) {
1100 st->print("%s", _name);
1101 int trap_req = uncommon_trap_request();
1102 if (trap_req != 0) {
1103 char buf[100];
1104 st->print("(%s)",
1105 Deoptimization::format_trap_request(buf, sizeof(buf),
1106 trap_req));
1107 }
1108 st->print(" ");
1109 }
1110 CallJavaNode::dump_spec(st);
1111 }
1112
dump_compact_spec(outputStream * st) const1113 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const {
1114 if (_method) {
1115 _method->print_short_name(st);
1116 } else if (_name) {
1117 st->print("%s", _name);
1118 } else {
1119 st->print("<?>");
1120 }
1121 }
1122 #endif
1123
1124 //=============================================================================
size_of() const1125 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
cmp(const Node & n) const1126 bool CallDynamicJavaNode::cmp( const Node &n ) const {
1127 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
1128 return CallJavaNode::cmp(call);
1129 }
1130
Ideal(PhaseGVN * phase,bool can_reshape)1131 Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1132 CallGenerator* cg = generator();
1133 if (can_reshape && cg != NULL) {
1134 assert(IncrementalInlineVirtual, "required");
1135 assert(cg->call_node() == this, "mismatch");
1136 assert(cg->is_virtual_late_inline(), "not virtual");
1137
1138 // Recover symbolic info for method resolution.
1139 ciMethod* caller = jvms()->method();
1140 ciBytecodeStream iter(caller);
1141 iter.force_bci(jvms()->bci());
1142
1143 bool not_used1;
1144 ciSignature* not_used2;
1145 ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode
1146 ciKlass* holder = iter.get_declared_method_holder();
1147 if (orig_callee->is_method_handle_intrinsic()) {
1148 assert(_override_symbolic_info, "required");
1149 orig_callee = method();
1150 holder = method()->holder();
1151 }
1152
1153 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
1154
1155 Node* receiver_node = in(TypeFunc::Parms);
1156 const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr();
1157
1158 int not_used3;
1159 bool call_does_dispatch;
1160 ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/,
1161 call_does_dispatch, not_used3); // out-parameters
1162 if (!call_does_dispatch) {
1163 // Register for late inlining.
1164 cg->set_callee_method(callee);
1165 phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same
1166 set_generator(NULL);
1167 }
1168 }
1169 return CallNode::Ideal(phase, can_reshape);
1170 }
1171
1172 #ifndef PRODUCT
dump_spec(outputStream * st) const1173 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
1174 st->print("# Dynamic ");
1175 CallJavaNode::dump_spec(st);
1176 }
1177 #endif
1178
1179 //=============================================================================
size_of() const1180 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
cmp(const Node & n) const1181 bool CallRuntimeNode::cmp( const Node &n ) const {
1182 CallRuntimeNode &call = (CallRuntimeNode&)n;
1183 return CallNode::cmp(call) && !strcmp(_name,call._name);
1184 }
1185 #ifndef PRODUCT
dump_spec(outputStream * st) const1186 void CallRuntimeNode::dump_spec(outputStream *st) const {
1187 st->print("# ");
1188 st->print("%s", _name);
1189 CallNode::dump_spec(st);
1190 }
1191 #endif
1192
1193 //=============================================================================
size_of() const1194 uint CallNativeNode::size_of() const { return sizeof(*this); }
cmp(const Node & n) const1195 bool CallNativeNode::cmp( const Node &n ) const {
1196 CallNativeNode &call = (CallNativeNode&)n;
1197 return CallNode::cmp(call) && !strcmp(_name,call._name)
1198 && _arg_regs == call._arg_regs && _ret_regs == call._ret_regs;
1199 }
match(const ProjNode * proj,const Matcher * matcher)1200 Node* CallNativeNode::match(const ProjNode *proj, const Matcher *matcher) {
1201 switch (proj->_con) {
1202 case TypeFunc::Control:
1203 case TypeFunc::I_O:
1204 case TypeFunc::Memory:
1205 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
1206 case TypeFunc::ReturnAdr:
1207 case TypeFunc::FramePtr:
1208 ShouldNotReachHere();
1209 case TypeFunc::Parms: {
1210 const Type* field_at_con = tf()->range()->field_at(proj->_con);
1211 const BasicType bt = field_at_con->basic_type();
1212 OptoReg::Name optoreg = OptoReg::as_OptoReg(_ret_regs.at(proj->_con - TypeFunc::Parms));
1213 OptoRegPair regs;
1214 if (bt == T_DOUBLE || bt == T_LONG) {
1215 regs.set2(optoreg);
1216 } else {
1217 regs.set1(optoreg);
1218 }
1219 RegMask rm = RegMask(regs.first());
1220 if(OptoReg::is_valid(regs.second()))
1221 rm.Insert(regs.second());
1222 return new MachProjNode(this, proj->_con, rm, field_at_con->ideal_reg());
1223 }
1224 case TypeFunc::Parms + 1: {
1225 assert(tf()->range()->field_at(proj->_con) == Type::HALF, "Expected HALF");
1226 assert(_ret_regs.at(proj->_con - TypeFunc::Parms) == VMRegImpl::Bad(), "Unexpected register for Type::HALF");
1227 // 2nd half of doubles and longs
1228 return new MachProjNode(this, proj->_con, RegMask::Empty, (uint) OptoReg::Bad);
1229 }
1230 default:
1231 ShouldNotReachHere();
1232 }
1233 return NULL;
1234 }
1235 #ifndef PRODUCT
print_regs(const GrowableArray<VMReg> & regs,outputStream * st)1236 void CallNativeNode::print_regs(const GrowableArray<VMReg>& regs, outputStream* st) {
1237 st->print("{ ");
1238 for (int i = 0; i < regs.length(); i++) {
1239 regs.at(i)->print_on(st);
1240 if (i < regs.length() - 1) {
1241 st->print(", ");
1242 }
1243 }
1244 st->print(" } ");
1245 }
1246
dump_spec(outputStream * st) const1247 void CallNativeNode::dump_spec(outputStream *st) const {
1248 st->print("# ");
1249 st->print("%s ", _name);
1250 st->print("_arg_regs: ");
1251 print_regs(_arg_regs, st);
1252 st->print("_ret_regs: ");
1253 print_regs(_ret_regs, st);
1254 CallNode::dump_spec(st);
1255 }
1256 #endif
1257
1258 //------------------------------calling_convention-----------------------------
calling_convention(BasicType * sig_bt,VMRegPair * parm_regs,uint argcnt) const1259 void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
1260 SharedRuntime::c_calling_convention(sig_bt, parm_regs, /*regs2=*/nullptr, argcnt);
1261 }
1262
calling_convention(BasicType * sig_bt,VMRegPair * parm_regs,uint argcnt) const1263 void CallNativeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1264 assert((tf()->domain()->cnt() - TypeFunc::Parms) == argcnt, "arg counts must match!");
1265 #ifdef ASSERT
1266 for (uint i = 0; i < argcnt; i++) {
1267 assert(tf()->domain()->field_at(TypeFunc::Parms + i)->basic_type() == sig_bt[i], "types must match!");
1268 }
1269 #endif
1270 for (uint i = 0; i < argcnt; i++) {
1271 switch (sig_bt[i]) {
1272 case T_BOOLEAN:
1273 case T_CHAR:
1274 case T_BYTE:
1275 case T_SHORT:
1276 case T_INT:
1277 case T_FLOAT:
1278 parm_regs[i].set1(_arg_regs.at(i));
1279 break;
1280 case T_LONG:
1281 case T_DOUBLE:
1282 assert((i + 1) < argcnt && sig_bt[i + 1] == T_VOID, "expecting half");
1283 parm_regs[i].set2(_arg_regs.at(i));
1284 break;
1285 case T_VOID: // Halves of longs and doubles
1286 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1287 assert(_arg_regs.at(i) == VMRegImpl::Bad(), "expecting bad reg");
1288 parm_regs[i].set_bad();
1289 break;
1290 default:
1291 ShouldNotReachHere();
1292 break;
1293 }
1294 }
1295 }
1296
1297 //=============================================================================
1298 //------------------------------calling_convention-----------------------------
1299
1300
1301 //=============================================================================
1302 #ifndef PRODUCT
dump_spec(outputStream * st) const1303 void CallLeafNode::dump_spec(outputStream *st) const {
1304 st->print("# ");
1305 st->print("%s", _name);
1306 CallNode::dump_spec(st);
1307 }
1308 #endif
1309
1310 //=============================================================================
1311
set_local(JVMState * jvms,uint idx,Node * c)1312 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1313 assert(verify_jvms(jvms), "jvms must match");
1314 int loc = jvms->locoff() + idx;
1315 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1316 // If current local idx is top then local idx - 1 could
1317 // be a long/double that needs to be killed since top could
1318 // represent the 2nd half ofthe long/double.
1319 uint ideal = in(loc -1)->ideal_reg();
1320 if (ideal == Op_RegD || ideal == Op_RegL) {
1321 // set other (low index) half to top
1322 set_req(loc - 1, in(loc));
1323 }
1324 }
1325 set_req(loc, c);
1326 }
1327
size_of() const1328 uint SafePointNode::size_of() const { return sizeof(*this); }
cmp(const Node & n) const1329 bool SafePointNode::cmp( const Node &n ) const {
1330 return (&n == this); // Always fail except on self
1331 }
1332
1333 //-------------------------set_next_exception----------------------------------
set_next_exception(SafePointNode * n)1334 void SafePointNode::set_next_exception(SafePointNode* n) {
1335 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
1336 if (len() == req()) {
1337 if (n != NULL) add_prec(n);
1338 } else {
1339 set_prec(req(), n);
1340 }
1341 }
1342
1343
1344 //----------------------------next_exception-----------------------------------
next_exception() const1345 SafePointNode* SafePointNode::next_exception() const {
1346 if (len() == req()) {
1347 return NULL;
1348 } else {
1349 Node* n = in(req());
1350 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1351 return (SafePointNode*) n;
1352 }
1353 }
1354
1355
1356 //------------------------------Ideal------------------------------------------
1357 // Skip over any collapsed Regions
Ideal(PhaseGVN * phase,bool can_reshape)1358 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1359 return remove_dead_region(phase, can_reshape) ? this : NULL;
1360 }
1361
1362 //------------------------------Identity---------------------------------------
1363 // Remove obviously duplicate safepoints
Identity(PhaseGVN * phase)1364 Node* SafePointNode::Identity(PhaseGVN* phase) {
1365
1366 // If you have back to back safepoints, remove one
1367 if( in(TypeFunc::Control)->is_SafePoint() )
1368 return in(TypeFunc::Control);
1369
1370 // Transforming long counted loops requires a safepoint node. Do not
1371 // eliminate a safepoint until loop opts are over.
1372 if (in(0)->is_Proj() && !phase->C->major_progress()) {
1373 Node *n0 = in(0)->in(0);
1374 // Check if he is a call projection (except Leaf Call)
1375 if( n0->is_Catch() ) {
1376 n0 = n0->in(0)->in(0);
1377 assert( n0->is_Call(), "expect a call here" );
1378 }
1379 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
1380 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode.
1381 // If the loop dies, they will be removed together.
1382 if (has_out_with(Op_OuterStripMinedLoopEnd)) {
1383 return this;
1384 }
1385 // Useless Safepoint, so remove it
1386 return in(TypeFunc::Control);
1387 }
1388 }
1389
1390 return this;
1391 }
1392
1393 //------------------------------Value------------------------------------------
Value(PhaseGVN * phase) const1394 const Type* SafePointNode::Value(PhaseGVN* phase) const {
1395 if (phase->type(in(0)) == Type::TOP) {
1396 return Type::TOP;
1397 }
1398 if (in(0) == this) {
1399 return Type::TOP; // Dead infinite loop
1400 }
1401 return Type::CONTROL;
1402 }
1403
1404 #ifndef PRODUCT
dump_spec(outputStream * st) const1405 void SafePointNode::dump_spec(outputStream *st) const {
1406 st->print(" SafePoint ");
1407 _replaced_nodes.dump(st);
1408 }
1409
1410 // The related nodes of a SafepointNode are all data inputs, excluding the
1411 // control boundary, as well as all outputs till level 2 (to include projection
1412 // nodes and targets). In compact mode, just include inputs till level 1 and
1413 // outputs as before.
related(GrowableArray<Node * > * in_rel,GrowableArray<Node * > * out_rel,bool compact) const1414 void SafePointNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
1415 if (compact) {
1416 this->collect_nodes(in_rel, 1, false, false);
1417 } else {
1418 this->collect_nodes_in_all_data(in_rel, false);
1419 }
1420 this->collect_nodes(out_rel, -2, false, false);
1421 }
1422 #endif
1423
in_RegMask(uint idx) const1424 const RegMask &SafePointNode::in_RegMask(uint idx) const {
1425 if( idx < TypeFunc::Parms ) return RegMask::Empty;
1426 // Values outside the domain represent debug info
1427 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1428 }
out_RegMask() const1429 const RegMask &SafePointNode::out_RegMask() const {
1430 return RegMask::Empty;
1431 }
1432
1433
grow_stack(JVMState * jvms,uint grow_by)1434 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1435 assert((int)grow_by > 0, "sanity");
1436 int monoff = jvms->monoff();
1437 int scloff = jvms->scloff();
1438 int endoff = jvms->endoff();
1439 assert(endoff == (int)req(), "no other states or debug info after me");
1440 Node* top = Compile::current()->top();
1441 for (uint i = 0; i < grow_by; i++) {
1442 ins_req(monoff, top);
1443 }
1444 jvms->set_monoff(monoff + grow_by);
1445 jvms->set_scloff(scloff + grow_by);
1446 jvms->set_endoff(endoff + grow_by);
1447 }
1448
push_monitor(const FastLockNode * lock)1449 void SafePointNode::push_monitor(const FastLockNode *lock) {
1450 // Add a LockNode, which points to both the original BoxLockNode (the
1451 // stack space for the monitor) and the Object being locked.
1452 const int MonitorEdges = 2;
1453 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1454 assert(req() == jvms()->endoff(), "correct sizing");
1455 int nextmon = jvms()->scloff();
1456 if (GenerateSynchronizationCode) {
1457 ins_req(nextmon, lock->box_node());
1458 ins_req(nextmon+1, lock->obj_node());
1459 } else {
1460 Node* top = Compile::current()->top();
1461 ins_req(nextmon, top);
1462 ins_req(nextmon, top);
1463 }
1464 jvms()->set_scloff(nextmon + MonitorEdges);
1465 jvms()->set_endoff(req());
1466 }
1467
pop_monitor()1468 void SafePointNode::pop_monitor() {
1469 // Delete last monitor from debug info
1470 debug_only(int num_before_pop = jvms()->nof_monitors());
1471 const int MonitorEdges = 2;
1472 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1473 int scloff = jvms()->scloff();
1474 int endoff = jvms()->endoff();
1475 int new_scloff = scloff - MonitorEdges;
1476 int new_endoff = endoff - MonitorEdges;
1477 jvms()->set_scloff(new_scloff);
1478 jvms()->set_endoff(new_endoff);
1479 while (scloff > new_scloff) del_req_ordered(--scloff);
1480 assert(jvms()->nof_monitors() == num_before_pop-1, "");
1481 }
1482
peek_monitor_box() const1483 Node *SafePointNode::peek_monitor_box() const {
1484 int mon = jvms()->nof_monitors() - 1;
1485 assert(mon >= 0, "must have a monitor");
1486 return monitor_box(jvms(), mon);
1487 }
1488
peek_monitor_obj() const1489 Node *SafePointNode::peek_monitor_obj() const {
1490 int mon = jvms()->nof_monitors() - 1;
1491 assert(mon >= 0, "must have a monitor");
1492 return monitor_obj(jvms(), mon);
1493 }
1494
1495 // Do we Match on this edge index or not? Match no edges
match_edge(uint idx) const1496 uint SafePointNode::match_edge(uint idx) const {
1497 return (TypeFunc::Parms == idx);
1498 }
1499
disconnect_from_root(PhaseIterGVN * igvn)1500 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) {
1501 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops");
1502 int nb = igvn->C->root()->find_prec_edge(this);
1503 if (nb != -1) {
1504 igvn->C->root()->rm_prec(nb);
1505 }
1506 }
1507
1508 //============== SafePointScalarObjectNode ==============
1509
SafePointScalarObjectNode(const TypeOopPtr * tp,AllocateNode * alloc,uint first_index,uint n_fields)1510 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1511 #ifdef ASSERT
1512 AllocateNode* alloc,
1513 #endif
1514 uint first_index,
1515 uint n_fields) :
1516 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1517 _first_index(first_index),
1518 _n_fields(n_fields)
1519 #ifdef ASSERT
1520 , _alloc(alloc)
1521 #endif
1522 {
1523 init_class_id(Class_SafePointScalarObject);
1524 }
1525
1526 // Do not allow value-numbering for SafePointScalarObject node.
hash() const1527 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
cmp(const Node & n) const1528 bool SafePointScalarObjectNode::cmp( const Node &n ) const {
1529 return (&n == this); // Always fail except on self
1530 }
1531
ideal_reg() const1532 uint SafePointScalarObjectNode::ideal_reg() const {
1533 return 0; // No matching to machine instruction
1534 }
1535
in_RegMask(uint idx) const1536 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1537 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1538 }
1539
out_RegMask() const1540 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1541 return RegMask::Empty;
1542 }
1543
match_edge(uint idx) const1544 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1545 return 0;
1546 }
1547
1548 SafePointScalarObjectNode*
clone(Dict * sosn_map,bool & new_node) const1549 SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const {
1550 void* cached = (*sosn_map)[(void*)this];
1551 if (cached != NULL) {
1552 new_node = false;
1553 return (SafePointScalarObjectNode*)cached;
1554 }
1555 new_node = true;
1556 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1557 sosn_map->Insert((void*)this, (void*)res);
1558 return res;
1559 }
1560
1561
1562 #ifndef PRODUCT
dump_spec(outputStream * st) const1563 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1564 st->print(" # fields@[%d..%d]", first_index(),
1565 first_index() + n_fields() - 1);
1566 }
1567
1568 #endif
1569
1570 //=============================================================================
size_of() const1571 uint AllocateNode::size_of() const { return sizeof(*this); }
1572
AllocateNode(Compile * C,const TypeFunc * atype,Node * ctrl,Node * mem,Node * abio,Node * size,Node * klass_node,Node * initial_test)1573 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1574 Node *ctrl, Node *mem, Node *abio,
1575 Node *size, Node *klass_node, Node *initial_test)
1576 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1577 {
1578 init_class_id(Class_Allocate);
1579 init_flags(Flag_is_macro);
1580 _is_scalar_replaceable = false;
1581 _is_non_escaping = false;
1582 _is_allocation_MemBar_redundant = false;
1583 Node *topnode = C->top();
1584
1585 init_req( TypeFunc::Control , ctrl );
1586 init_req( TypeFunc::I_O , abio );
1587 init_req( TypeFunc::Memory , mem );
1588 init_req( TypeFunc::ReturnAdr, topnode );
1589 init_req( TypeFunc::FramePtr , topnode );
1590 init_req( AllocSize , size);
1591 init_req( KlassNode , klass_node);
1592 init_req( InitialTest , initial_test);
1593 init_req( ALength , topnode);
1594 C->add_macro_node(this);
1595 }
1596
compute_MemBar_redundancy(ciMethod * initializer)1597 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1598 {
1599 assert(initializer != NULL &&
1600 initializer->is_initializer() &&
1601 !initializer->is_static(),
1602 "unexpected initializer method");
1603 BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1604 if (analyzer == NULL) {
1605 return;
1606 }
1607
1608 // Allocation node is first parameter in its initializer
1609 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1610 _is_allocation_MemBar_redundant = true;
1611 }
1612 }
make_ideal_mark(PhaseGVN * phase,Node * obj,Node * control,Node * mem)1613 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
1614 Node* mark_node = NULL;
1615 // For now only enable fast locking for non-array types
1616 if (UseBiasedLocking && Opcode() == Op_Allocate) {
1617 Node* klass_node = in(AllocateNode::KlassNode);
1618 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1619 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1620 } else {
1621 mark_node = phase->MakeConX(markWord::prototype().value());
1622 }
1623 return mark_node;
1624 }
1625
1626 //=============================================================================
Ideal(PhaseGVN * phase,bool can_reshape)1627 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1628 if (remove_dead_region(phase, can_reshape)) return this;
1629 // Don't bother trying to transform a dead node
1630 if (in(0) && in(0)->is_top()) return NULL;
1631
1632 const Type* type = phase->type(Ideal_length());
1633 if (type->isa_int() && type->is_int()->_hi < 0) {
1634 if (can_reshape) {
1635 PhaseIterGVN *igvn = phase->is_IterGVN();
1636 // Unreachable fall through path (negative array length),
1637 // the allocation can only throw so disconnect it.
1638 Node* proj = proj_out_or_null(TypeFunc::Control);
1639 Node* catchproj = NULL;
1640 if (proj != NULL) {
1641 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1642 Node *cn = proj->fast_out(i);
1643 if (cn->is_Catch()) {
1644 catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index);
1645 break;
1646 }
1647 }
1648 }
1649 if (catchproj != NULL && catchproj->outcnt() > 0 &&
1650 (catchproj->outcnt() > 1 ||
1651 catchproj->unique_out()->Opcode() != Op_Halt)) {
1652 assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1653 Node* nproj = catchproj->clone();
1654 igvn->register_new_node_with_optimizer(nproj);
1655
1656 Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr );
1657 frame = phase->transform(frame);
1658 // Halt & Catch Fire
1659 Node* halt = new HaltNode(nproj, frame, "unexpected negative array length");
1660 phase->C->root()->add_req(halt);
1661 phase->transform(halt);
1662
1663 igvn->replace_node(catchproj, phase->C->top());
1664 return this;
1665 }
1666 } else {
1667 // Can't correct it during regular GVN so register for IGVN
1668 phase->C->record_for_igvn(this);
1669 }
1670 }
1671 return NULL;
1672 }
1673
1674 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1675 // CastII, if appropriate. If we are not allowed to create new nodes, and
1676 // a CastII is appropriate, return NULL.
make_ideal_length(const TypeOopPtr * oop_type,PhaseTransform * phase,bool allow_new_nodes)1677 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1678 Node *length = in(AllocateNode::ALength);
1679 assert(length != NULL, "length is not null");
1680
1681 const TypeInt* length_type = phase->find_int_type(length);
1682 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1683
1684 if (ary_type != NULL && length_type != NULL) {
1685 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1686 if (narrow_length_type != length_type) {
1687 // Assert one of:
1688 // - the narrow_length is 0
1689 // - the narrow_length is not wider than length
1690 assert(narrow_length_type == TypeInt::ZERO ||
1691 length_type->is_con() && narrow_length_type->is_con() &&
1692 (narrow_length_type->_hi <= length_type->_lo) ||
1693 (narrow_length_type->_hi <= length_type->_hi &&
1694 narrow_length_type->_lo >= length_type->_lo),
1695 "narrow type must be narrower than length type");
1696
1697 // Return NULL if new nodes are not allowed
1698 if (!allow_new_nodes) return NULL;
1699 // Create a cast which is control dependent on the initialization to
1700 // propagate the fact that the array length must be positive.
1701 InitializeNode* init = initialization();
1702 assert(init != NULL, "initialization not found");
1703 length = new CastIINode(length, narrow_length_type);
1704 length->set_req(0, init->proj_out_or_null(0));
1705 }
1706 }
1707
1708 return length;
1709 }
1710
1711 //=============================================================================
size_of() const1712 uint LockNode::size_of() const { return sizeof(*this); }
1713
1714 // Redundant lock elimination
1715 //
1716 // There are various patterns of locking where we release and
1717 // immediately reacquire a lock in a piece of code where no operations
1718 // occur in between that would be observable. In those cases we can
1719 // skip releasing and reacquiring the lock without violating any
1720 // fairness requirements. Doing this around a loop could cause a lock
1721 // to be held for a very long time so we concentrate on non-looping
1722 // control flow. We also require that the operations are fully
1723 // redundant meaning that we don't introduce new lock operations on
1724 // some paths so to be able to eliminate it on others ala PRE. This
1725 // would probably require some more extensive graph manipulation to
1726 // guarantee that the memory edges were all handled correctly.
1727 //
1728 // Assuming p is a simple predicate which can't trap in any way and s
1729 // is a synchronized method consider this code:
1730 //
1731 // s();
1732 // if (p)
1733 // s();
1734 // else
1735 // s();
1736 // s();
1737 //
1738 // 1. The unlocks of the first call to s can be eliminated if the
1739 // locks inside the then and else branches are eliminated.
1740 //
1741 // 2. The unlocks of the then and else branches can be eliminated if
1742 // the lock of the final call to s is eliminated.
1743 //
1744 // Either of these cases subsumes the simple case of sequential control flow
1745 //
1746 // Addtionally we can eliminate versions without the else case:
1747 //
1748 // s();
1749 // if (p)
1750 // s();
1751 // s();
1752 //
1753 // 3. In this case we eliminate the unlock of the first s, the lock
1754 // and unlock in the then case and the lock in the final s.
1755 //
1756 // Note also that in all these cases the then/else pieces don't have
1757 // to be trivial as long as they begin and end with synchronization
1758 // operations.
1759 //
1760 // s();
1761 // if (p)
1762 // s();
1763 // f();
1764 // s();
1765 // s();
1766 //
1767 // The code will work properly for this case, leaving in the unlock
1768 // before the call to f and the relock after it.
1769 //
1770 // A potentially interesting case which isn't handled here is when the
1771 // locking is partially redundant.
1772 //
1773 // s();
1774 // if (p)
1775 // s();
1776 //
1777 // This could be eliminated putting unlocking on the else case and
1778 // eliminating the first unlock and the lock in the then side.
1779 // Alternatively the unlock could be moved out of the then side so it
1780 // was after the merge and the first unlock and second lock
1781 // eliminated. This might require less manipulation of the memory
1782 // state to get correct.
1783 //
1784 // Additionally we might allow work between a unlock and lock before
1785 // giving up eliminating the locks. The current code disallows any
1786 // conditional control flow between these operations. A formulation
1787 // similar to partial redundancy elimination computing the
1788 // availability of unlocking and the anticipatability of locking at a
1789 // program point would allow detection of fully redundant locking with
1790 // some amount of work in between. I'm not sure how often I really
1791 // think that would occur though. Most of the cases I've seen
1792 // indicate it's likely non-trivial work would occur in between.
1793 // There may be other more complicated constructs where we could
1794 // eliminate locking but I haven't seen any others appear as hot or
1795 // interesting.
1796 //
1797 // Locking and unlocking have a canonical form in ideal that looks
1798 // roughly like this:
1799 //
1800 // <obj>
1801 // | \\------+
1802 // | \ \
1803 // | BoxLock \
1804 // | | | \
1805 // | | \ \
1806 // | | FastLock
1807 // | | /
1808 // | | /
1809 // | | |
1810 //
1811 // Lock
1812 // |
1813 // Proj #0
1814 // |
1815 // MembarAcquire
1816 // |
1817 // Proj #0
1818 //
1819 // MembarRelease
1820 // |
1821 // Proj #0
1822 // |
1823 // Unlock
1824 // |
1825 // Proj #0
1826 //
1827 //
1828 // This code proceeds by processing Lock nodes during PhaseIterGVN
1829 // and searching back through its control for the proper code
1830 // patterns. Once it finds a set of lock and unlock operations to
1831 // eliminate they are marked as eliminatable which causes the
1832 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1833 //
1834 //=============================================================================
1835
1836 //
1837 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
1838 // - copy regions. (These may not have been optimized away yet.)
1839 // - eliminated locking nodes
1840 //
next_control(Node * ctrl)1841 static Node *next_control(Node *ctrl) {
1842 if (ctrl == NULL)
1843 return NULL;
1844 while (1) {
1845 if (ctrl->is_Region()) {
1846 RegionNode *r = ctrl->as_Region();
1847 Node *n = r->is_copy();
1848 if (n == NULL)
1849 break; // hit a region, return it
1850 else
1851 ctrl = n;
1852 } else if (ctrl->is_Proj()) {
1853 Node *in0 = ctrl->in(0);
1854 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1855 ctrl = in0->in(0);
1856 } else {
1857 break;
1858 }
1859 } else {
1860 break; // found an interesting control
1861 }
1862 }
1863 return ctrl;
1864 }
1865 //
1866 // Given a control, see if it's the control projection of an Unlock which
1867 // operating on the same object as lock.
1868 //
find_matching_unlock(const Node * ctrl,LockNode * lock,GrowableArray<AbstractLockNode * > & lock_ops)1869 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1870 GrowableArray<AbstractLockNode*> &lock_ops) {
1871 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1872 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1873 Node *n = ctrl_proj->in(0);
1874 if (n != NULL && n->is_Unlock()) {
1875 UnlockNode *unlock = n->as_Unlock();
1876 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1877 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
1878 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
1879 if (lock_obj->eqv_uncast(unlock_obj) &&
1880 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1881 !unlock->is_eliminated()) {
1882 lock_ops.append(unlock);
1883 return true;
1884 }
1885 }
1886 }
1887 return false;
1888 }
1889
1890 //
1891 // Find the lock matching an unlock. Returns null if a safepoint
1892 // or complicated control is encountered first.
find_matching_lock(UnlockNode * unlock)1893 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1894 LockNode *lock_result = NULL;
1895 // find the matching lock, or an intervening safepoint
1896 Node *ctrl = next_control(unlock->in(0));
1897 while (1) {
1898 assert(ctrl != NULL, "invalid control graph");
1899 assert(!ctrl->is_Start(), "missing lock for unlock");
1900 if (ctrl->is_top()) break; // dead control path
1901 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1902 if (ctrl->is_SafePoint()) {
1903 break; // found a safepoint (may be the lock we are searching for)
1904 } else if (ctrl->is_Region()) {
1905 // Check for a simple diamond pattern. Punt on anything more complicated
1906 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1907 Node *in1 = next_control(ctrl->in(1));
1908 Node *in2 = next_control(ctrl->in(2));
1909 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1910 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1911 ctrl = next_control(in1->in(0)->in(0));
1912 } else {
1913 break;
1914 }
1915 } else {
1916 break;
1917 }
1918 } else {
1919 ctrl = next_control(ctrl->in(0)); // keep searching
1920 }
1921 }
1922 if (ctrl->is_Lock()) {
1923 LockNode *lock = ctrl->as_Lock();
1924 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1925 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
1926 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node());
1927 if (lock_obj->eqv_uncast(unlock_obj) &&
1928 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1929 lock_result = lock;
1930 }
1931 }
1932 return lock_result;
1933 }
1934
1935 // This code corresponds to case 3 above.
1936
find_lock_and_unlock_through_if(Node * node,LockNode * lock,GrowableArray<AbstractLockNode * > & lock_ops)1937 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1938 GrowableArray<AbstractLockNode*> &lock_ops) {
1939 Node* if_node = node->in(0);
1940 bool if_true = node->is_IfTrue();
1941
1942 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1943 Node *lock_ctrl = next_control(if_node->in(0));
1944 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1945 Node* lock1_node = NULL;
1946 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1947 if (if_true) {
1948 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1949 lock1_node = proj->unique_out();
1950 }
1951 } else {
1952 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1953 lock1_node = proj->unique_out();
1954 }
1955 }
1956 if (lock1_node != NULL && lock1_node->is_Lock()) {
1957 LockNode *lock1 = lock1_node->as_Lock();
1958 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1959 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node());
1960 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node());
1961 if (lock_obj->eqv_uncast(lock1_obj) &&
1962 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1963 !lock1->is_eliminated()) {
1964 lock_ops.append(lock1);
1965 return true;
1966 }
1967 }
1968 }
1969 }
1970
1971 lock_ops.trunc_to(0);
1972 return false;
1973 }
1974
find_unlocks_for_region(const RegionNode * region,LockNode * lock,GrowableArray<AbstractLockNode * > & lock_ops)1975 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1976 GrowableArray<AbstractLockNode*> &lock_ops) {
1977 // check each control merging at this point for a matching unlock.
1978 // in(0) should be self edge so skip it.
1979 for (int i = 1; i < (int)region->req(); i++) {
1980 Node *in_node = next_control(region->in(i));
1981 if (in_node != NULL) {
1982 if (find_matching_unlock(in_node, lock, lock_ops)) {
1983 // found a match so keep on checking.
1984 continue;
1985 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1986 continue;
1987 }
1988
1989 // If we fall through to here then it was some kind of node we
1990 // don't understand or there wasn't a matching unlock, so give
1991 // up trying to merge locks.
1992 lock_ops.trunc_to(0);
1993 return false;
1994 }
1995 }
1996 return true;
1997
1998 }
1999
2000 #ifndef PRODUCT
2001 //
2002 // Create a counter which counts the number of times this lock is acquired
2003 //
create_lock_counter(JVMState * state)2004 void AbstractLockNode::create_lock_counter(JVMState* state) {
2005 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
2006 }
2007
set_eliminated_lock_counter()2008 void AbstractLockNode::set_eliminated_lock_counter() {
2009 if (_counter) {
2010 // Update the counter to indicate that this lock was eliminated.
2011 // The counter update code will stay around even though the
2012 // optimizer will eliminate the lock operation itself.
2013 _counter->set_tag(NamedCounter::EliminatedLockCounter);
2014 }
2015 }
2016
2017 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"};
2018
dump_spec(outputStream * st) const2019 void AbstractLockNode::dump_spec(outputStream* st) const {
2020 st->print("%s ", _kind_names[_kind]);
2021 CallNode::dump_spec(st);
2022 }
2023
dump_compact_spec(outputStream * st) const2024 void AbstractLockNode::dump_compact_spec(outputStream* st) const {
2025 st->print("%s", _kind_names[_kind]);
2026 }
2027
2028 // The related set of lock nodes includes the control boundary.
related(GrowableArray<Node * > * in_rel,GrowableArray<Node * > * out_rel,bool compact) const2029 void AbstractLockNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
2030 if (compact) {
2031 this->collect_nodes(in_rel, 1, false, false);
2032 } else {
2033 this->collect_nodes_in_all_data(in_rel, true);
2034 }
2035 this->collect_nodes(out_rel, -2, false, false);
2036 }
2037 #endif
2038
2039 //=============================================================================
Ideal(PhaseGVN * phase,bool can_reshape)2040 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2041
2042 // perform any generic optimizations first (returns 'this' or NULL)
2043 Node *result = SafePointNode::Ideal(phase, can_reshape);
2044 if (result != NULL) return result;
2045 // Don't bother trying to transform a dead node
2046 if (in(0) && in(0)->is_top()) return NULL;
2047
2048 // Now see if we can optimize away this lock. We don't actually
2049 // remove the locking here, we simply set the _eliminate flag which
2050 // prevents macro expansion from expanding the lock. Since we don't
2051 // modify the graph, the value returned from this function is the
2052 // one computed above.
2053 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2054 //
2055 // If we are locking an unescaped object, the lock/unlock is unnecessary
2056 //
2057 ConnectionGraph *cgr = phase->C->congraph();
2058 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2059 assert(!is_eliminated() || is_coarsened(), "sanity");
2060 // The lock could be marked eliminated by lock coarsening
2061 // code during first IGVN before EA. Replace coarsened flag
2062 // to eliminate all associated locks/unlocks.
2063 #ifdef ASSERT
2064 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1");
2065 #endif
2066 this->set_non_esc_obj();
2067 return result;
2068 }
2069
2070 //
2071 // Try lock coarsening
2072 //
2073 PhaseIterGVN* iter = phase->is_IterGVN();
2074 if (iter != NULL && !is_eliminated()) {
2075
2076 GrowableArray<AbstractLockNode*> lock_ops;
2077
2078 Node *ctrl = next_control(in(0));
2079
2080 // now search back for a matching Unlock
2081 if (find_matching_unlock(ctrl, this, lock_ops)) {
2082 // found an unlock directly preceding this lock. This is the
2083 // case of single unlock directly control dependent on a
2084 // single lock which is the trivial version of case 1 or 2.
2085 } else if (ctrl->is_Region() ) {
2086 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
2087 // found lock preceded by multiple unlocks along all paths
2088 // joining at this point which is case 3 in description above.
2089 }
2090 } else {
2091 // see if this lock comes from either half of an if and the
2092 // predecessors merges unlocks and the other half of the if
2093 // performs a lock.
2094 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
2095 // found unlock splitting to an if with locks on both branches.
2096 }
2097 }
2098
2099 if (lock_ops.length() > 0) {
2100 // add ourselves to the list of locks to be eliminated.
2101 lock_ops.append(this);
2102
2103 #ifndef PRODUCT
2104 if (PrintEliminateLocks) {
2105 int locks = 0;
2106 int unlocks = 0;
2107 for (int i = 0; i < lock_ops.length(); i++) {
2108 AbstractLockNode* lock = lock_ops.at(i);
2109 if (lock->Opcode() == Op_Lock)
2110 locks++;
2111 else
2112 unlocks++;
2113 if (Verbose) {
2114 lock->dump(1);
2115 }
2116 }
2117 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
2118 }
2119 #endif
2120
2121 // for each of the identified locks, mark them
2122 // as eliminatable
2123 for (int i = 0; i < lock_ops.length(); i++) {
2124 AbstractLockNode* lock = lock_ops.at(i);
2125
2126 // Mark it eliminated by coarsening and update any counters
2127 #ifdef ASSERT
2128 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
2129 #endif
2130 lock->set_coarsened();
2131 }
2132 } else if (ctrl->is_Region() &&
2133 iter->_worklist.member(ctrl)) {
2134 // We weren't able to find any opportunities but the region this
2135 // lock is control dependent on hasn't been processed yet so put
2136 // this lock back on the worklist so we can check again once any
2137 // region simplification has occurred.
2138 iter->_worklist.push(this);
2139 }
2140 }
2141 }
2142
2143 return result;
2144 }
2145
2146 //=============================================================================
is_nested_lock_region()2147 bool LockNode::is_nested_lock_region() {
2148 return is_nested_lock_region(NULL);
2149 }
2150
2151 // p is used for access to compilation log; no logging if NULL
is_nested_lock_region(Compile * c)2152 bool LockNode::is_nested_lock_region(Compile * c) {
2153 BoxLockNode* box = box_node()->as_BoxLock();
2154 int stk_slot = box->stack_slot();
2155 if (stk_slot <= 0) {
2156 #ifdef ASSERT
2157 this->log_lock_optimization(c, "eliminate_lock_INLR_1");
2158 #endif
2159 return false; // External lock or it is not Box (Phi node).
2160 }
2161
2162 // Ignore complex cases: merged locks or multiple locks.
2163 Node* obj = obj_node();
2164 LockNode* unique_lock = NULL;
2165 if (!box->is_simple_lock_region(&unique_lock, obj)) {
2166 #ifdef ASSERT
2167 this->log_lock_optimization(c, "eliminate_lock_INLR_2a");
2168 #endif
2169 return false;
2170 }
2171 if (unique_lock != this) {
2172 #ifdef ASSERT
2173 this->log_lock_optimization(c, "eliminate_lock_INLR_2b");
2174 #endif
2175 return false;
2176 }
2177
2178 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2179 obj = bs->step_over_gc_barrier(obj);
2180 // Look for external lock for the same object.
2181 SafePointNode* sfn = this->as_SafePoint();
2182 JVMState* youngest_jvms = sfn->jvms();
2183 int max_depth = youngest_jvms->depth();
2184 for (int depth = 1; depth <= max_depth; depth++) {
2185 JVMState* jvms = youngest_jvms->of_depth(depth);
2186 int num_mon = jvms->nof_monitors();
2187 // Loop over monitors
2188 for (int idx = 0; idx < num_mon; idx++) {
2189 Node* obj_node = sfn->monitor_obj(jvms, idx);
2190 obj_node = bs->step_over_gc_barrier(obj_node);
2191 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
2192 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
2193 return true;
2194 }
2195 }
2196 }
2197 #ifdef ASSERT
2198 this->log_lock_optimization(c, "eliminate_lock_INLR_3");
2199 #endif
2200 return false;
2201 }
2202
2203 //=============================================================================
size_of() const2204 uint UnlockNode::size_of() const { return sizeof(*this); }
2205
2206 //=============================================================================
Ideal(PhaseGVN * phase,bool can_reshape)2207 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2208
2209 // perform any generic optimizations first (returns 'this' or NULL)
2210 Node *result = SafePointNode::Ideal(phase, can_reshape);
2211 if (result != NULL) return result;
2212 // Don't bother trying to transform a dead node
2213 if (in(0) && in(0)->is_top()) return NULL;
2214
2215 // Now see if we can optimize away this unlock. We don't actually
2216 // remove the unlocking here, we simply set the _eliminate flag which
2217 // prevents macro expansion from expanding the unlock. Since we don't
2218 // modify the graph, the value returned from this function is the
2219 // one computed above.
2220 // Escape state is defined after Parse phase.
2221 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
2222 //
2223 // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
2224 //
2225 ConnectionGraph *cgr = phase->C->congraph();
2226 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
2227 assert(!is_eliminated() || is_coarsened(), "sanity");
2228 // The lock could be marked eliminated by lock coarsening
2229 // code during first IGVN before EA. Replace coarsened flag
2230 // to eliminate all associated locks/unlocks.
2231 #ifdef ASSERT
2232 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2");
2233 #endif
2234 this->set_non_esc_obj();
2235 }
2236 }
2237 return result;
2238 }
2239
kind_as_string() const2240 const char * AbstractLockNode::kind_as_string() const {
2241 return is_coarsened() ? "coarsened" :
2242 is_nested() ? "nested" :
2243 is_non_esc_obj() ? "non_escaping" :
2244 "?";
2245 }
2246
log_lock_optimization(Compile * C,const char * tag) const2247 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) const {
2248 if (C == NULL) {
2249 return;
2250 }
2251 CompileLog* log = C->log();
2252 if (log != NULL) {
2253 log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'",
2254 tag, is_Lock(), C->compile_id(),
2255 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
2256 kind_as_string());
2257 log->stamp();
2258 log->end_head();
2259 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
2260 while (p != NULL) {
2261 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
2262 p = p->caller();
2263 }
2264 log->tail(tag);
2265 }
2266 }
2267
may_modify_arraycopy_helper(const TypeOopPtr * dest_t,const TypeOopPtr * t_oop,PhaseTransform * phase)2268 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase) {
2269 if (dest_t->is_known_instance() && t_oop->is_known_instance()) {
2270 return dest_t->instance_id() == t_oop->instance_id();
2271 }
2272
2273 if (dest_t->isa_instptr() && !dest_t->klass()->equals(phase->C->env()->Object_klass())) {
2274 // clone
2275 if (t_oop->isa_aryptr()) {
2276 return false;
2277 }
2278 if (!t_oop->isa_instptr()) {
2279 return true;
2280 }
2281 if (dest_t->klass()->is_subtype_of(t_oop->klass()) || t_oop->klass()->is_subtype_of(dest_t->klass())) {
2282 return true;
2283 }
2284 // unrelated
2285 return false;
2286 }
2287
2288 if (dest_t->isa_aryptr()) {
2289 // arraycopy or array clone
2290 if (t_oop->isa_instptr()) {
2291 return false;
2292 }
2293 if (!t_oop->isa_aryptr()) {
2294 return true;
2295 }
2296
2297 const Type* elem = dest_t->is_aryptr()->elem();
2298 if (elem == Type::BOTTOM) {
2299 // An array but we don't know what elements are
2300 return true;
2301 }
2302
2303 dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();
2304 uint dest_alias = phase->C->get_alias_index(dest_t);
2305 uint t_oop_alias = phase->C->get_alias_index(t_oop);
2306
2307 return dest_alias == t_oop_alias;
2308 }
2309
2310 return true;
2311 }
2312