1 /*
2 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "oops/objArrayKlass.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/memnode.hpp"
31 #include "opto/mulnode.hpp"
32 #include "opto/parse.hpp"
33 #include "opto/rootnode.hpp"
34 #include "opto/runtime.hpp"
35 #include "runtime/sharedRuntime.hpp"
36
37 //------------------------------make_dtrace_method_entry_exit ----------------
38 // Dtrace -- record entry or exit of a method if compiled with dtrace support
make_dtrace_method_entry_exit(ciMethod * method,bool is_entry)39 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
40 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type();
41 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
42 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
43 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";
44
45 // Get base of thread-local storage area
46 Node* thread = _gvn.transform( new (C) ThreadLocalNode() );
47
48 // Get method
49 const TypePtr* method_type = TypeMetadataPtr::make(method);
50 Node *method_node = _gvn.transform( ConNode::make(C, method_type) );
51
52 kill_dead_locals();
53
54 // For some reason, this call reads only raw memory.
55 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
56 make_runtime_call(RC_LEAF | RC_NARROW_MEM,
57 call_type, call_address,
58 call_name, raw_adr_type,
59 thread, method_node);
60 }
61
62
63 //=============================================================================
64 //------------------------------do_checkcast-----------------------------------
do_checkcast()65 void Parse::do_checkcast() {
66 bool will_link;
67 ciKlass* klass = iter().get_klass(will_link);
68
69 Node *obj = peek();
70
71 // Throw uncommon trap if class is not loaded or the value we are casting
72 // _from_ is not loaded, and value is not null. If the value _is_ NULL,
73 // then the checkcast does nothing.
74 const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
75 if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
76 if (C->log() != NULL) {
77 if (!will_link) {
78 C->log()->elem("assert_null reason='checkcast' klass='%d'",
79 C->log()->identify(klass));
80 }
81 if (tp && tp->klass() && !tp->klass()->is_loaded()) {
82 // %%% Cannot happen?
83 C->log()->elem("assert_null reason='checkcast source' klass='%d'",
84 C->log()->identify(tp->klass()));
85 }
86 }
87 null_assert(obj);
88 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
89 if (!stopped()) {
90 profile_null_checkcast();
91 }
92 return;
93 }
94
95 Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) );
96
97 // Pop from stack AFTER gen_checkcast because it can uncommon trap and
98 // the debug info has to be correct.
99 pop();
100 push(res);
101 }
102
103
104 //------------------------------do_instanceof----------------------------------
do_instanceof()105 void Parse::do_instanceof() {
106 if (stopped()) return;
107 // We would like to return false if class is not loaded, emitting a
108 // dependency, but Java requires instanceof to load its operand.
109
110 // Throw uncommon trap if class is not loaded
111 bool will_link;
112 ciKlass* klass = iter().get_klass(will_link);
113
114 if (!will_link) {
115 if (C->log() != NULL) {
116 C->log()->elem("assert_null reason='instanceof' klass='%d'",
117 C->log()->identify(klass));
118 }
119 null_assert(peek());
120 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
121 if (!stopped()) {
122 // The object is now known to be null.
123 // Shortcut the effect of gen_instanceof and return "false" directly.
124 pop(); // pop the null
125 push(_gvn.intcon(0)); // push false answer
126 }
127 return;
128 }
129
130 // Push the bool result back on stack
131 Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true);
132
133 // Pop from stack AFTER gen_instanceof because it can uncommon trap.
134 pop();
135 push(res);
136 }
137
138 //------------------------------array_store_check------------------------------
139 // pull array from stack and check that the store is valid
array_store_check()140 void Parse::array_store_check() {
141
142 // Shorthand access to array store elements without popping them.
143 Node *obj = peek(0);
144 Node *idx = peek(1);
145 Node *ary = peek(2);
146
147 if (_gvn.type(obj) == TypePtr::NULL_PTR) {
148 // There's never a type check on null values.
149 // This cutout lets us avoid the uncommon_trap(Reason_array_check)
150 // below, which turns into a performance liability if the
151 // gen_checkcast folds up completely.
152 return;
153 }
154
155 // Extract the array klass type
156 int klass_offset = oopDesc::klass_offset_in_bytes();
157 Node* p = basic_plus_adr( ary, ary, klass_offset );
158 // p's type is array-of-OOPS plus klass_offset
159 Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
160 // Get the array klass
161 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
162
163 // The type of array_klass is usually INexact array-of-oop. Heroically
164 // cast array_klass to EXACT array and uncommon-trap if the cast fails.
165 // Make constant out of the inexact array klass, but use it only if the cast
166 // succeeds.
167 bool always_see_exact_class = false;
168 if (MonomorphicArrayCheck
169 && !too_many_traps(Deoptimization::Reason_array_check)
170 && !tak->klass_is_exact()
171 && tak != TypeKlassPtr::OBJECT) {
172 // Regarding the fourth condition in the if-statement from above:
173 //
174 // If the compiler has determined that the type of array 'ary' (represented
175 // by 'array_klass') is java/lang/Object, the compiler must not assume that
176 // the array 'ary' is monomorphic.
177 //
178 // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
179 // because it is not possible to perform a arraystore into an object that is not
180 // a "proper" array.
181 //
182 // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
183 // successfully perform the store.
184 //
185 // The implementation reasons for the condition are the following:
186 //
187 // java/lang/Object is the superclass of all arrays, but it is represented by the VM
188 // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
189 // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
190 //
191 // See issue JDK-8057622 for details.
192
193 always_see_exact_class = true;
194 // (If no MDO at all, hope for the best, until a trap actually occurs.)
195
196 // Make a constant out of the inexact array klass
197 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
198 Node* con = makecon(extak);
199 Node* cmp = _gvn.transform(new (C) CmpPNode( array_klass, con ));
200 Node* bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::eq ));
201 Node* ctrl= control();
202 { BuildCutout unless(this, bol, PROB_MAX);
203 uncommon_trap(Deoptimization::Reason_array_check,
204 Deoptimization::Action_maybe_recompile,
205 tak->klass());
206 }
207 if (stopped()) { // MUST uncommon-trap?
208 set_control(ctrl); // Then Don't Do It, just fall into the normal checking
209 } else { // Cast array klass to exactness:
210 // Use the exact constant value we know it is.
211 replace_in_map(array_klass,con);
212 CompileLog* log = C->log();
213 if (log != NULL) {
214 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
215 log->identify(tak->klass()));
216 }
217 array_klass = con; // Use cast value moving forward
218 }
219 }
220
221 // Come here for polymorphic array klasses
222
223 // Extract the array element class
224 int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
225 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
226 // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
227 // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
228 // LoadKlassNode.
229 Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
230 immutable_memory(), p2, tak));
231
232 // Check (the hard way) and throw if not a subklass.
233 // Result is ignored, we just need the CFG effects.
234 gen_checkcast(obj, a_e_klass);
235 }
236
237
emit_guard_for_new(ciInstanceKlass * klass)238 void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
239 // Emit guarded new
240 // if (klass->_init_thread != current_thread ||
241 // klass->_init_state != being_initialized)
242 // uncommon_trap
243 Node* cur_thread = _gvn.transform( new (C) ThreadLocalNode() );
244 Node* merge = new (C) RegionNode(3);
245 _gvn.set_type(merge, Type::CONTROL);
246 Node* kls = makecon(TypeKlassPtr::make(klass));
247
248 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
249 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
250 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
251 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
252 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
253 set_control(IfTrue(iff));
254 merge->set_req(1, IfFalse(iff));
255
256 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
257 adr_node = basic_plus_adr(kls, kls, init_state_offset);
258 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
259 // can generate code to load it as unsigned byte.
260 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
261 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
262 tst = Bool( CmpI( init_state, being_init), BoolTest::eq);
263 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
264 set_control(IfTrue(iff));
265 merge->set_req(2, IfFalse(iff));
266
267 PreserveJVMState pjvms(this);
268 record_for_igvn(merge);
269 set_control(merge);
270
271 uncommon_trap(Deoptimization::Reason_uninitialized,
272 Deoptimization::Action_reinterpret,
273 klass);
274 }
275
276
277 //------------------------------do_new-----------------------------------------
do_new()278 void Parse::do_new() {
279 kill_dead_locals();
280
281 bool will_link;
282 ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass();
283 assert(will_link, "_new: typeflow responsibility");
284
285 // Should initialize, or throw an InstantiationError?
286 if (!klass->is_initialized() && !klass->is_being_initialized() ||
287 klass->is_abstract() || klass->is_interface() ||
288 klass->name() == ciSymbol::java_lang_Class() ||
289 iter().is_unresolved_klass()) {
290 uncommon_trap(Deoptimization::Reason_uninitialized,
291 Deoptimization::Action_reinterpret,
292 klass);
293 return;
294 }
295 if (klass->is_being_initialized()) {
296 emit_guard_for_new(klass);
297 }
298
299 Node* kls = makecon(TypeKlassPtr::make(klass));
300 Node* obj = new_instance(kls);
301
302 // Push resultant oop onto stack
303 push(obj);
304
305 // Keep track of whether opportunities exist for StringBuilder
306 // optimizations.
307 if (OptimizeStringConcat &&
308 (klass == C->env()->StringBuilder_klass() ||
309 klass == C->env()->StringBuffer_klass())) {
310 C->set_has_stringbuilder(true);
311 }
312
313 // Keep track of boxed values for EliminateAutoBox optimizations.
314 if (C->eliminate_boxing() && klass->is_box_klass()) {
315 C->set_has_boxed_value(true);
316 }
317 }
318
319 #ifndef PRODUCT
320 //------------------------------dump_map_adr_mem-------------------------------
321 // Debug dump of the mapping from address types to MergeMemNode indices.
dump_map_adr_mem() const322 void Parse::dump_map_adr_mem() const {
323 tty->print_cr("--- Mapping from address types to memory Nodes ---");
324 MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ?
325 map()->memory()->as_MergeMem() : NULL);
326 for (uint i = 0; i < (uint)C->num_alias_types(); i++) {
327 C->alias_type(i)->print_on(tty);
328 tty->print("\t");
329 // Node mapping, if any
330 if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) {
331 mem->in(i)->dump();
332 } else {
333 tty->cr();
334 }
335 }
336 }
337
338 #endif
339
340
341 //=============================================================================
342 //
343 // parser methods for profiling
344
345
346 //----------------------test_counter_against_threshold ------------------------
test_counter_against_threshold(Node * cnt,int limit)347 void Parse::test_counter_against_threshold(Node* cnt, int limit) {
348 // Test the counter against the limit and uncommon trap if greater.
349
350 // This code is largely copied from the range check code in
351 // array_addressing()
352
353 // Test invocation count vs threshold
354 Node *threshold = makecon(TypeInt::make(limit));
355 Node *chk = _gvn.transform( new (C) CmpUNode( cnt, threshold) );
356 BoolTest::mask btest = BoolTest::lt;
357 Node *tst = _gvn.transform( new (C) BoolNode( chk, btest) );
358 // Branch to failure if threshold exceeded
359 { BuildCutout unless(this, tst, PROB_ALWAYS);
360 uncommon_trap(Deoptimization::Reason_age,
361 Deoptimization::Action_maybe_recompile);
362 }
363 }
364
365 //----------------------increment_and_test_invocation_counter-------------------
increment_and_test_invocation_counter(int limit)366 void Parse::increment_and_test_invocation_counter(int limit) {
367 if (!count_invocations()) return;
368
369 // Get the Method* node.
370 ciMethod* m = method();
371 MethodCounters* counters_adr = m->ensure_method_counters();
372 if (counters_adr == NULL) {
373 C->record_failure("method counters allocation failed");
374 return;
375 }
376
377 Node* ctrl = control();
378 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
379 Node *counters_node = makecon(adr_type);
380 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
381 MethodCounters::interpreter_invocation_counter_offset_in_bytes());
382 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
383
384 test_counter_against_threshold(cnt, limit);
385
386 // Add one to the counter and store
387 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
388 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
389 }
390
391 //----------------------------method_data_addressing---------------------------
method_data_addressing(ciMethodData * md,ciProfileData * data,ByteSize counter_offset,Node * idx,uint stride)392 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
393 // Get offset within MethodData* of the data array
394 ByteSize data_offset = MethodData::data_offset();
395
396 // Get cell offset of the ProfileData within data array
397 int cell_offset = md->dp_to_di(data->dp());
398
399 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
400 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
401
402 const TypePtr* adr_type = TypeMetadataPtr::make(md);
403 Node* mdo = makecon(adr_type);
404 Node* ptr = basic_plus_adr(mdo, mdo, offset);
405
406 if (stride != 0) {
407 Node* str = _gvn.MakeConX(stride);
408 Node* scale = _gvn.transform( new (C) MulXNode( idx, str ) );
409 ptr = _gvn.transform( new (C) AddPNode( mdo, ptr, scale ) );
410 }
411
412 return ptr;
413 }
414
415 //--------------------------increment_md_counter_at----------------------------
increment_md_counter_at(ciMethodData * md,ciProfileData * data,ByteSize counter_offset,Node * idx,uint stride)416 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
417 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
418
419 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
420 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
421 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
422 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
423 }
424
425 //--------------------------test_for_osr_md_counter_at-------------------------
test_for_osr_md_counter_at(ciMethodData * md,ciProfileData * data,ByteSize counter_offset,int limit)426 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
427 Node* adr_node = method_data_addressing(md, data, counter_offset);
428
429 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
430 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
431
432 test_counter_against_threshold(cnt, limit);
433 }
434
435 //-------------------------------set_md_flag_at--------------------------------
set_md_flag_at(ciMethodData * md,ciProfileData * data,int flag_constant)436 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
437 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
438
439 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
440 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered);
441 Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
442 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered);
443 }
444
445 //----------------------------profile_taken_branch-----------------------------
profile_taken_branch(int target_bci,bool force_update)446 void Parse::profile_taken_branch(int target_bci, bool force_update) {
447 // This is a potential osr_site if we have a backedge.
448 int cur_bci = bci();
449 bool osr_site =
450 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
451
452 // If we are going to OSR, restart at the target bytecode.
453 set_bci(target_bci);
454
455 // To do: factor out the the limit calculations below. These duplicate
456 // the similar limit calculations in the interpreter.
457
458 if (method_data_update() || force_update) {
459 ciMethodData* md = method()->method_data();
460 assert(md != NULL, "expected valid ciMethodData");
461 ciProfileData* data = md->bci_to_data(cur_bci);
462 assert(data->is_JumpData(), "need JumpData for taken branch");
463 increment_md_counter_at(md, data, JumpData::taken_offset());
464 }
465
466 // In the new tiered system this is all we need to do. In the old
467 // (c2 based) tiered sytem we must do the code below.
468 #ifndef TIERED
469 if (method_data_update()) {
470 ciMethodData* md = method()->method_data();
471 if (osr_site) {
472 ciProfileData* data = md->bci_to_data(cur_bci);
473 int limit = (CompileThreshold
474 * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
475 test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
476 }
477 } else {
478 // With method data update off, use the invocation counter to trigger an
479 // OSR compilation, as done in the interpreter.
480 if (osr_site) {
481 int limit = (CompileThreshold * OnStackReplacePercentage) / 100;
482 increment_and_test_invocation_counter(limit);
483 }
484 }
485 #endif // TIERED
486
487 // Restore the original bytecode.
488 set_bci(cur_bci);
489 }
490
491 //--------------------------profile_not_taken_branch---------------------------
profile_not_taken_branch(bool force_update)492 void Parse::profile_not_taken_branch(bool force_update) {
493
494 if (method_data_update() || force_update) {
495 ciMethodData* md = method()->method_data();
496 assert(md != NULL, "expected valid ciMethodData");
497 ciProfileData* data = md->bci_to_data(bci());
498 assert(data->is_BranchData(), "need BranchData for not taken branch");
499 increment_md_counter_at(md, data, BranchData::not_taken_offset());
500 }
501
502 }
503
504 //---------------------------------profile_call--------------------------------
profile_call(Node * receiver)505 void Parse::profile_call(Node* receiver) {
506 if (!method_data_update()) return;
507
508 switch (bc()) {
509 case Bytecodes::_invokevirtual:
510 case Bytecodes::_invokeinterface:
511 profile_receiver_type(receiver);
512 break;
513 case Bytecodes::_invokestatic:
514 case Bytecodes::_invokedynamic:
515 case Bytecodes::_invokespecial:
516 profile_generic_call();
517 break;
518 default: fatal("unexpected call bytecode");
519 }
520 }
521
522 //------------------------------profile_generic_call---------------------------
profile_generic_call()523 void Parse::profile_generic_call() {
524 assert(method_data_update(), "must be generating profile code");
525
526 ciMethodData* md = method()->method_data();
527 assert(md != NULL, "expected valid ciMethodData");
528 ciProfileData* data = md->bci_to_data(bci());
529 assert(data->is_CounterData(), "need CounterData for not taken branch");
530 increment_md_counter_at(md, data, CounterData::count_offset());
531 }
532
533 //-----------------------------profile_receiver_type---------------------------
profile_receiver_type(Node * receiver)534 void Parse::profile_receiver_type(Node* receiver) {
535 assert(method_data_update(), "must be generating profile code");
536
537 ciMethodData* md = method()->method_data();
538 assert(md != NULL, "expected valid ciMethodData");
539 ciProfileData* data = md->bci_to_data(bci());
540 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here");
541
542 // Skip if we aren't tracking receivers
543 if (TypeProfileWidth < 1) {
544 increment_md_counter_at(md, data, CounterData::count_offset());
545 return;
546 }
547 ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();
548
549 Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));
550
551 // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems.
552 // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM.
553 make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(),
554 CAST_FROM_FN_PTR(address,
555 OptoRuntime::profile_receiver_type_C),
556 "profile_receiver_type_C",
557 TypePtr::BOTTOM,
558 method_data, receiver);
559 }
560
561 //---------------------------------profile_ret---------------------------------
profile_ret(int target_bci)562 void Parse::profile_ret(int target_bci) {
563 if (!method_data_update()) return;
564
565 // Skip if we aren't tracking ret targets
566 if (TypeProfileWidth < 1) return;
567
568 ciMethodData* md = method()->method_data();
569 assert(md != NULL, "expected valid ciMethodData");
570 ciProfileData* data = md->bci_to_data(bci());
571 assert(data->is_RetData(), "need RetData for ret");
572 ciRetData* ret_data = (ciRetData*)data->as_RetData();
573
574 // Look for the target_bci is already in the table
575 uint row;
576 bool table_full = true;
577 for (row = 0; row < ret_data->row_limit(); row++) {
578 int key = ret_data->bci(row);
579 table_full &= (key != RetData::no_bci);
580 if (key == target_bci) break;
581 }
582
583 if (row >= ret_data->row_limit()) {
584 // The target_bci was not found in the table.
585 if (!table_full) {
586 // XXX: Make slow call to update RetData
587 }
588 return;
589 }
590
591 // the target_bci is already in the table
592 increment_md_counter_at(md, data, RetData::bci_count_offset(row));
593 }
594
595 //--------------------------profile_null_checkcast----------------------------
profile_null_checkcast()596 void Parse::profile_null_checkcast() {
597 // Set the null-seen flag, done in conjunction with the usual null check. We
598 // never unset the flag, so this is a one-way switch.
599 if (!method_data_update()) return;
600
601 ciMethodData* md = method()->method_data();
602 assert(md != NULL, "expected valid ciMethodData");
603 ciProfileData* data = md->bci_to_data(bci());
604 assert(data->is_BitData(), "need BitData for checkcast");
605 set_md_flag_at(md, data, BitData::null_seen_byte_constant());
606 }
607
608 //-----------------------------profile_switch_case-----------------------------
profile_switch_case(int table_index)609 void Parse::profile_switch_case(int table_index) {
610 if (!method_data_update()) return;
611
612 ciMethodData* md = method()->method_data();
613 assert(md != NULL, "expected valid ciMethodData");
614
615 ciProfileData* data = md->bci_to_data(bci());
616 assert(data->is_MultiBranchData(), "need MultiBranchData for switch case");
617 if (table_index >= 0) {
618 increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index));
619 } else {
620 increment_md_counter_at(md, data, MultiBranchData::default_count_offset());
621 }
622 }
623