1 /*
2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "oops/objArrayKlass.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/memnode.hpp"
31 #include "opto/mulnode.hpp"
32 #include "opto/parse.hpp"
33 #include "opto/rootnode.hpp"
34 #include "opto/runtime.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 
37 //------------------------------make_dtrace_method_entry_exit ----------------
38 // Dtrace -- record entry or exit of a method if compiled with dtrace support
make_dtrace_method_entry_exit(ciMethod * method,bool is_entry)39 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
40   const TypeFunc *call_type    = OptoRuntime::dtrace_method_entry_exit_Type();
41   address         call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
42                                             CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
43   const char     *call_name    = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";
44 
45   // Get base of thread-local storage area
46   Node* thread = _gvn.transform( new ThreadLocalNode() );
47 
48   // Get method
49   const TypePtr* method_type = TypeMetadataPtr::make(method);
50   Node *method_node = _gvn.transform(ConNode::make(method_type));
51 
52   kill_dead_locals();
53 
54   // For some reason, this call reads only raw memory.
55   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
56   make_runtime_call(RC_LEAF | RC_NARROW_MEM,
57                     call_type, call_address,
58                     call_name, raw_adr_type,
59                     thread, method_node);
60 }
61 
62 
63 //=============================================================================
64 //------------------------------do_checkcast-----------------------------------
do_checkcast()65 void Parse::do_checkcast() {
66   bool will_link;
67   ciKlass* klass = iter().get_klass(will_link);
68 
69   Node *obj = peek();
70 
71   // Throw uncommon trap if class is not loaded or the value we are casting
72   // _from_ is not loaded, and value is not null.  If the value _is_ NULL,
73   // then the checkcast does nothing.
74   const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
75   if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
76     if (C->log() != NULL) {
77       if (!will_link) {
78         C->log()->elem("assert_null reason='checkcast' klass='%d'",
79                        C->log()->identify(klass));
80       }
81       if (tp && tp->klass() && !tp->klass()->is_loaded()) {
82         // %%% Cannot happen?
83         C->log()->elem("assert_null reason='checkcast source' klass='%d'",
84                        C->log()->identify(tp->klass()));
85       }
86     }
87     null_assert(obj);
88     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
89     if (!stopped()) {
90       profile_null_checkcast();
91     }
92     return;
93   }
94 
95   Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) );
96 
97   // Pop from stack AFTER gen_checkcast because it can uncommon trap and
98   // the debug info has to be correct.
99   pop();
100   push(res);
101 }
102 
103 
104 //------------------------------do_instanceof----------------------------------
do_instanceof()105 void Parse::do_instanceof() {
106   if (stopped())  return;
107   // We would like to return false if class is not loaded, emitting a
108   // dependency, but Java requires instanceof to load its operand.
109 
110   // Throw uncommon trap if class is not loaded
111   bool will_link;
112   ciKlass* klass = iter().get_klass(will_link);
113 
114   if (!will_link) {
115     if (C->log() != NULL) {
116       C->log()->elem("assert_null reason='instanceof' klass='%d'",
117                      C->log()->identify(klass));
118     }
119     null_assert(peek());
120     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
121     if (!stopped()) {
122       // The object is now known to be null.
123       // Shortcut the effect of gen_instanceof and return "false" directly.
124       pop();                   // pop the null
125       push(_gvn.intcon(0));    // push false answer
126     }
127     return;
128   }
129 
130   // Push the bool result back on stack
131   Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true);
132 
133   // Pop from stack AFTER gen_instanceof because it can uncommon trap.
134   pop();
135   push(res);
136 }
137 
138 //------------------------------array_store_check------------------------------
139 // pull array from stack and check that the store is valid
array_store_check()140 void Parse::array_store_check() {
141 
142   // Shorthand access to array store elements without popping them.
143   Node *obj = peek(0);
144   Node *idx = peek(1);
145   Node *ary = peek(2);
146 
147   if (_gvn.type(obj) == TypePtr::NULL_PTR) {
148     // There's never a type check on null values.
149     // This cutout lets us avoid the uncommon_trap(Reason_array_check)
150     // below, which turns into a performance liability if the
151     // gen_checkcast folds up completely.
152     return;
153   }
154 
155   // Extract the array klass type
156   int klass_offset = oopDesc::klass_offset_in_bytes();
157   Node* p = basic_plus_adr( ary, ary, klass_offset );
158   // p's type is array-of-OOPS plus klass_offset
159   Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
160   // Get the array klass
161   const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
162 
163   // The type of array_klass is usually INexact array-of-oop.  Heroically
164   // cast array_klass to EXACT array and uncommon-trap if the cast fails.
165   // Make constant out of the inexact array klass, but use it only if the cast
166   // succeeds.
167   bool always_see_exact_class = false;
168   if (MonomorphicArrayCheck
169       && !too_many_traps(Deoptimization::Reason_array_check)
170       && !tak->klass_is_exact()
171       && tak != TypeKlassPtr::OBJECT) {
172       // Regarding the fourth condition in the if-statement from above:
173       //
174       // If the compiler has determined that the type of array 'ary' (represented
175       // by 'array_klass') is java/lang/Object, the compiler must not assume that
176       // the array 'ary' is monomorphic.
177       //
178       // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
179       // because it is not possible to perform a arraystore into an object that is not
180       // a "proper" array.
181       //
182       // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
183       // successfully perform the store.
184       //
185       // The implementation reasons for the condition are the following:
186       //
187       // java/lang/Object is the superclass of all arrays, but it is represented by the VM
188       // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
189       // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
190       //
191       // See issue JDK-8057622 for details.
192 
193     always_see_exact_class = true;
194     // (If no MDO at all, hope for the best, until a trap actually occurs.)
195 
196     // Make a constant out of the inexact array klass
197     const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
198     Node* con = makecon(extak);
199     Node* cmp = _gvn.transform(new CmpPNode( array_klass, con ));
200     Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq ));
201     Node* ctrl= control();
202     { BuildCutout unless(this, bol, PROB_MAX);
203       uncommon_trap(Deoptimization::Reason_array_check,
204                     Deoptimization::Action_maybe_recompile,
205                     tak->klass());
206     }
207     if (stopped()) {          // MUST uncommon-trap?
208       set_control(ctrl);      // Then Don't Do It, just fall into the normal checking
209     } else {                  // Cast array klass to exactness:
210       // Use the exact constant value we know it is.
211       replace_in_map(array_klass,con);
212       CompileLog* log = C->log();
213       if (log != NULL) {
214         log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
215                   log->identify(tak->klass()));
216       }
217       array_klass = con;      // Use cast value moving forward
218     }
219   }
220 
221   // Come here for polymorphic array klasses
222 
223   // Extract the array element class
224   int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
225   Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
226   // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
227   // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
228   // LoadKlassNode.
229   Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
230                                                        immutable_memory(), p2, tak));
231 
232   // Check (the hard way) and throw if not a subklass.
233   // Result is ignored, we just need the CFG effects.
234   gen_checkcast(obj, a_e_klass);
235 }
236 
237 
238 //------------------------------do_new-----------------------------------------
do_new()239 void Parse::do_new() {
240   kill_dead_locals();
241 
242   bool will_link;
243   ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass();
244   assert(will_link, "_new: typeflow responsibility");
245 
246   // Should throw an InstantiationError?
247   if (klass->is_abstract() || klass->is_interface() ||
248       klass->name() == ciSymbol::java_lang_Class() ||
249       iter().is_unresolved_klass()) {
250     uncommon_trap(Deoptimization::Reason_unhandled,
251                   Deoptimization::Action_none,
252                   klass);
253     return;
254   }
255 
256   if (C->needs_clinit_barrier(klass, method())) {
257     clinit_barrier(klass, method());
258     if (stopped())  return;
259   }
260 
261   Node* kls = makecon(TypeKlassPtr::make(klass));
262   Node* obj = new_instance(kls);
263 
264   // Push resultant oop onto stack
265   push(obj);
266 
267   // Keep track of whether opportunities exist for StringBuilder
268   // optimizations.
269   if (OptimizeStringConcat &&
270       (klass == C->env()->StringBuilder_klass() ||
271        klass == C->env()->StringBuffer_klass())) {
272     C->set_has_stringbuilder(true);
273   }
274 
275   // Keep track of boxed values for EliminateAutoBox optimizations.
276   if (C->eliminate_boxing() && klass->is_box_klass()) {
277     C->set_has_boxed_value(true);
278   }
279 }
280 
281 #ifndef PRODUCT
282 //------------------------------dump_map_adr_mem-------------------------------
283 // Debug dump of the mapping from address types to MergeMemNode indices.
dump_map_adr_mem() const284 void Parse::dump_map_adr_mem() const {
285   tty->print_cr("--- Mapping from address types to memory Nodes ---");
286   MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ?
287                                       map()->memory()->as_MergeMem() : NULL);
288   for (uint i = 0; i < (uint)C->num_alias_types(); i++) {
289     C->alias_type(i)->print_on(tty);
290     tty->print("\t");
291     // Node mapping, if any
292     if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) {
293       mem->in(i)->dump();
294     } else {
295       tty->cr();
296     }
297   }
298 }
299 
300 #endif
301 
302 
303 //=============================================================================
304 //
305 // parser methods for profiling
306 
307 
308 //----------------------test_counter_against_threshold ------------------------
test_counter_against_threshold(Node * cnt,int limit)309 void Parse::test_counter_against_threshold(Node* cnt, int limit) {
310   // Test the counter against the limit and uncommon trap if greater.
311 
312   // This code is largely copied from the range check code in
313   // array_addressing()
314 
315   // Test invocation count vs threshold
316   Node *threshold = makecon(TypeInt::make(limit));
317   Node *chk   = _gvn.transform( new CmpUNode( cnt, threshold) );
318   BoolTest::mask btest = BoolTest::lt;
319   Node *tst   = _gvn.transform( new BoolNode( chk, btest) );
320   // Branch to failure if threshold exceeded
321   { BuildCutout unless(this, tst, PROB_ALWAYS);
322     uncommon_trap(Deoptimization::Reason_age,
323                   Deoptimization::Action_maybe_recompile);
324   }
325 }
326 
327 //----------------------increment_and_test_invocation_counter-------------------
increment_and_test_invocation_counter(int limit)328 void Parse::increment_and_test_invocation_counter(int limit) {
329   if (!count_invocations()) return;
330 
331   // Get the Method* node.
332   ciMethod* m = method();
333   MethodCounters* counters_adr = m->ensure_method_counters();
334   if (counters_adr == NULL) {
335     C->record_failure("method counters allocation failed");
336     return;
337   }
338 
339   Node* ctrl = control();
340   const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
341   Node *counters_node = makecon(adr_type);
342   Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
343     MethodCounters::interpreter_invocation_counter_offset_in_bytes());
344   Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
345 
346   test_counter_against_threshold(cnt, limit);
347 
348   // Add one to the counter and store
349   Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1)));
350   store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
351 }
352 
353 //----------------------------method_data_addressing---------------------------
method_data_addressing(ciMethodData * md,ciProfileData * data,ByteSize counter_offset,Node * idx,uint stride)354 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
355   // Get offset within MethodData* of the data array
356   ByteSize data_offset = MethodData::data_offset();
357 
358   // Get cell offset of the ProfileData within data array
359   int cell_offset = md->dp_to_di(data->dp());
360 
361   // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
362   int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
363 
364   const TypePtr* adr_type = TypeMetadataPtr::make(md);
365   Node* mdo = makecon(adr_type);
366   Node* ptr = basic_plus_adr(mdo, mdo, offset);
367 
368   if (stride != 0) {
369     Node* str = _gvn.MakeConX(stride);
370     Node* scale = _gvn.transform( new MulXNode( idx, str ) );
371     ptr   = _gvn.transform( new AddPNode( mdo, ptr, scale ) );
372   }
373 
374   return ptr;
375 }
376 
377 //--------------------------increment_md_counter_at----------------------------
increment_md_counter_at(ciMethodData * md,ciProfileData * data,ByteSize counter_offset,Node * idx,uint stride)378 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
379   Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
380 
381   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
382   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
383   Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
384   store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
385 }
386 
387 //--------------------------test_for_osr_md_counter_at-------------------------
test_for_osr_md_counter_at(ciMethodData * md,ciProfileData * data,ByteSize counter_offset,int limit)388 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
389   Node* adr_node = method_data_addressing(md, data, counter_offset);
390 
391   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
392   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
393 
394   test_counter_against_threshold(cnt, limit);
395 }
396 
397 //-------------------------------set_md_flag_at--------------------------------
set_md_flag_at(ciMethodData * md,ciProfileData * data,int flag_constant)398 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
399   Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
400 
401   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
402   Node* flags = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
403   Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant)));
404   store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
405 }
406 
407 //----------------------------profile_taken_branch-----------------------------
profile_taken_branch(int target_bci,bool force_update)408 void Parse::profile_taken_branch(int target_bci, bool force_update) {
409   // This is a potential osr_site if we have a backedge.
410   int cur_bci = bci();
411   bool osr_site =
412     (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
413 
414   // If we are going to OSR, restart at the target bytecode.
415   set_bci(target_bci);
416 
417   // To do: factor out the the limit calculations below. These duplicate
418   // the similar limit calculations in the interpreter.
419 
420   if (method_data_update() || force_update) {
421     ciMethodData* md = method()->method_data();
422     assert(md != NULL, "expected valid ciMethodData");
423     ciProfileData* data = md->bci_to_data(cur_bci);
424     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
425     increment_md_counter_at(md, data, JumpData::taken_offset());
426   }
427 
428   // In the new tiered system this is all we need to do. In the old
429   // (c2 based) tiered sytem we must do the code below.
430 #ifndef TIERED
431   if (method_data_update()) {
432     ciMethodData* md = method()->method_data();
433     if (osr_site) {
434       ciProfileData* data = md->bci_to_data(cur_bci);
435       assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
436       int limit = (int)((int64_t)CompileThreshold
437                    * (OnStackReplacePercentage - InterpreterProfilePercentage) / 100);
438       test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
439     }
440   } else {
441     // With method data update off, use the invocation counter to trigger an
442     // OSR compilation, as done in the interpreter.
443     if (osr_site) {
444       int limit = (int)((int64_t)CompileThreshold * OnStackReplacePercentage / 100);
445       increment_and_test_invocation_counter(limit);
446     }
447   }
448 #endif // TIERED
449 
450   // Restore the original bytecode.
451   set_bci(cur_bci);
452 }
453 
454 //--------------------------profile_not_taken_branch---------------------------
profile_not_taken_branch(bool force_update)455 void Parse::profile_not_taken_branch(bool force_update) {
456 
457   if (method_data_update() || force_update) {
458     ciMethodData* md = method()->method_data();
459     assert(md != NULL, "expected valid ciMethodData");
460     ciProfileData* data = md->bci_to_data(bci());
461     assert(data != NULL && data->is_BranchData(), "need BranchData for not taken branch");
462     increment_md_counter_at(md, data, BranchData::not_taken_offset());
463   }
464 
465 }
466 
467 //---------------------------------profile_call--------------------------------
profile_call(Node * receiver)468 void Parse::profile_call(Node* receiver) {
469   if (!method_data_update()) return;
470 
471   switch (bc()) {
472   case Bytecodes::_invokevirtual:
473   case Bytecodes::_invokeinterface:
474     profile_receiver_type(receiver);
475     break;
476   case Bytecodes::_invokestatic:
477   case Bytecodes::_invokedynamic:
478   case Bytecodes::_invokespecial:
479     profile_generic_call();
480     break;
481   default: fatal("unexpected call bytecode");
482   }
483 }
484 
485 //------------------------------profile_generic_call---------------------------
profile_generic_call()486 void Parse::profile_generic_call() {
487   assert(method_data_update(), "must be generating profile code");
488 
489   ciMethodData* md = method()->method_data();
490   assert(md != NULL, "expected valid ciMethodData");
491   ciProfileData* data = md->bci_to_data(bci());
492   assert(data != NULL && data->is_CounterData(), "need CounterData for not taken branch");
493   increment_md_counter_at(md, data, CounterData::count_offset());
494 }
495 
496 //-----------------------------profile_receiver_type---------------------------
profile_receiver_type(Node * receiver)497 void Parse::profile_receiver_type(Node* receiver) {
498   assert(method_data_update(), "must be generating profile code");
499 
500   ciMethodData* md = method()->method_data();
501   assert(md != NULL, "expected valid ciMethodData");
502   ciProfileData* data = md->bci_to_data(bci());
503   assert(data != NULL && data->is_ReceiverTypeData(), "need ReceiverTypeData here");
504 
505   // Skip if we aren't tracking receivers
506   if (TypeProfileWidth < 1) {
507     increment_md_counter_at(md, data, CounterData::count_offset());
508     return;
509   }
510   ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();
511 
512   Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));
513 
514   // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems.
515   // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM.
516   make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(),
517                     CAST_FROM_FN_PTR(address,
518                                      OptoRuntime::profile_receiver_type_C),
519                     "profile_receiver_type_C",
520                     TypePtr::BOTTOM,
521                     method_data, receiver);
522 }
523 
524 //---------------------------------profile_ret---------------------------------
profile_ret(int target_bci)525 void Parse::profile_ret(int target_bci) {
526   if (!method_data_update()) return;
527 
528   // Skip if we aren't tracking ret targets
529   if (TypeProfileWidth < 1) return;
530 
531   ciMethodData* md = method()->method_data();
532   assert(md != NULL, "expected valid ciMethodData");
533   ciProfileData* data = md->bci_to_data(bci());
534   assert(data != NULL && data->is_RetData(), "need RetData for ret");
535   ciRetData* ret_data = (ciRetData*)data->as_RetData();
536 
537   // Look for the target_bci is already in the table
538   uint row;
539   bool table_full = true;
540   for (row = 0; row < ret_data->row_limit(); row++) {
541     int key = ret_data->bci(row);
542     table_full &= (key != RetData::no_bci);
543     if (key == target_bci) break;
544   }
545 
546   if (row >= ret_data->row_limit()) {
547     // The target_bci was not found in the table.
548     if (!table_full) {
549       // XXX: Make slow call to update RetData
550     }
551     return;
552   }
553 
554   // the target_bci is already in the table
555   increment_md_counter_at(md, data, RetData::bci_count_offset(row));
556 }
557 
558 //--------------------------profile_null_checkcast----------------------------
profile_null_checkcast()559 void Parse::profile_null_checkcast() {
560   // Set the null-seen flag, done in conjunction with the usual null check. We
561   // never unset the flag, so this is a one-way switch.
562   if (!method_data_update()) return;
563 
564   ciMethodData* md = method()->method_data();
565   assert(md != NULL, "expected valid ciMethodData");
566   ciProfileData* data = md->bci_to_data(bci());
567   assert(data != NULL && data->is_BitData(), "need BitData for checkcast");
568   set_md_flag_at(md, data, BitData::null_seen_byte_constant());
569 }
570 
571 //-----------------------------profile_switch_case-----------------------------
profile_switch_case(int table_index)572 void Parse::profile_switch_case(int table_index) {
573   if (!method_data_update()) return;
574 
575   ciMethodData* md = method()->method_data();
576   assert(md != NULL, "expected valid ciMethodData");
577 
578   ciProfileData* data = md->bci_to_data(bci());
579   assert(data != NULL && data->is_MultiBranchData(), "need MultiBranchData for switch case");
580   if (table_index >= 0) {
581     increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index));
582   } else {
583     increment_md_counter_at(md, data, MultiBranchData::default_count_offset());
584   }
585 }
586