1 /*
2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  *
22  */
23 
24 #include "precompiled.hpp"
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
27 #include "gc/shenandoah/shenandoahForwarding.hpp"
28 #include "gc/shenandoah/shenandoahHeap.hpp"
29 #include "gc/shenandoah/shenandoahRuntime.hpp"
30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
31 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
32 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "opto/arraycopynode.hpp"
35 #include "opto/escape.hpp"
36 #include "opto/graphKit.hpp"
37 #include "opto/idealKit.hpp"
38 #include "opto/macro.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/narrowptrnode.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/runtime.hpp"
43 
bsc2()44 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
45   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
46 }
47 
ShenandoahBarrierSetC2State(Arena * comp_arena)48 ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
49   : _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8,  0, NULL)),
50     _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8,  0, NULL)) {
51 }
52 
iu_barriers_count() const53 int ShenandoahBarrierSetC2State::iu_barriers_count() const {
54   return _iu_barriers->length();
55 }
56 
iu_barrier(int idx) const57 ShenandoahIUBarrierNode* ShenandoahBarrierSetC2State::iu_barrier(int idx) const {
58   return _iu_barriers->at(idx);
59 }
60 
add_iu_barrier(ShenandoahIUBarrierNode * n)61 void ShenandoahBarrierSetC2State::add_iu_barrier(ShenandoahIUBarrierNode * n) {
62   assert(!_iu_barriers->contains(n), "duplicate entry in barrier list");
63   _iu_barriers->append(n);
64 }
65 
remove_iu_barrier(ShenandoahIUBarrierNode * n)66 void ShenandoahBarrierSetC2State::remove_iu_barrier(ShenandoahIUBarrierNode * n) {
67   if (_iu_barriers->contains(n)) {
68     _iu_barriers->remove(n);
69   }
70 }
71 
load_reference_barriers_count() const72 int ShenandoahBarrierSetC2State::load_reference_barriers_count() const {
73   return _load_reference_barriers->length();
74 }
75 
load_reference_barrier(int idx) const76 ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const {
77   return _load_reference_barriers->at(idx);
78 }
79 
add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n)80 void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
81   assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list");
82   _load_reference_barriers->append(n);
83 }
84 
remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n)85 void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
86   if (_load_reference_barriers->contains(n)) {
87     _load_reference_barriers->remove(n);
88   }
89 }
90 
shenandoah_iu_barrier(GraphKit * kit,Node * obj) const91 Node* ShenandoahBarrierSetC2::shenandoah_iu_barrier(GraphKit* kit, Node* obj) const {
92   if (ShenandoahIUBarrier) {
93     return kit->gvn().transform(new ShenandoahIUBarrierNode(obj));
94   }
95   return obj;
96 }
97 
98 #define __ kit->
99 
satb_can_remove_pre_barrier(GraphKit * kit,PhaseTransform * phase,Node * adr,BasicType bt,uint adr_idx) const100 bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
101                                                          BasicType bt, uint adr_idx) const {
102   intptr_t offset = 0;
103   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
104   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
105 
106   if (offset == Type::OffsetBot) {
107     return false; // cannot unalias unless there are precise offsets
108   }
109 
110   if (alloc == NULL) {
111     return false; // No allocation found
112   }
113 
114   intptr_t size_in_bytes = type2aelembytes(bt);
115 
116   Node* mem = __ memory(adr_idx); // start searching here...
117 
118   for (int cnt = 0; cnt < 50; cnt++) {
119 
120     if (mem->is_Store()) {
121 
122       Node* st_adr = mem->in(MemNode::Address);
123       intptr_t st_offset = 0;
124       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
125 
126       if (st_base == NULL) {
127         break; // inscrutable pointer
128       }
129 
130       // Break we have found a store with same base and offset as ours so break
131       if (st_base == base && st_offset == offset) {
132         break;
133       }
134 
135       if (st_offset != offset && st_offset != Type::OffsetBot) {
136         const int MAX_STORE = BytesPerLong;
137         if (st_offset >= offset + size_in_bytes ||
138             st_offset <= offset - MAX_STORE ||
139             st_offset <= offset - mem->as_Store()->memory_size()) {
140           // Success:  The offsets are provably independent.
141           // (You may ask, why not just test st_offset != offset and be done?
142           // The answer is that stores of different sizes can co-exist
143           // in the same sequence of RawMem effects.  We sometimes initialize
144           // a whole 'tile' of array elements with a single jint or jlong.)
145           mem = mem->in(MemNode::Memory);
146           continue; // advance through independent store memory
147         }
148       }
149 
150       if (st_base != base
151           && MemNode::detect_ptr_independence(base, alloc, st_base,
152                                               AllocateNode::Ideal_allocation(st_base, phase),
153                                               phase)) {
154         // Success:  The bases are provably independent.
155         mem = mem->in(MemNode::Memory);
156         continue; // advance through independent store memory
157       }
158     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
159 
160       InitializeNode* st_init = mem->in(0)->as_Initialize();
161       AllocateNode* st_alloc = st_init->allocation();
162 
163       // Make sure that we are looking at the same allocation site.
164       // The alloc variable is guaranteed to not be null here from earlier check.
165       if (alloc == st_alloc) {
166         // Check that the initialization is storing NULL so that no previous store
167         // has been moved up and directly write a reference
168         Node* captured_store = st_init->find_captured_store(offset,
169                                                             type2aelembytes(T_OBJECT),
170                                                             phase);
171         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
172           return true;
173         }
174       }
175     }
176 
177     // Unless there is an explicit 'continue', we must bail out here,
178     // because 'mem' is an inscrutable memory state (e.g., a call).
179     break;
180   }
181 
182   return false;
183 }
184 
185 #undef __
186 #define __ ideal.
187 
satb_write_barrier_pre(GraphKit * kit,bool do_load,Node * obj,Node * adr,uint alias_idx,Node * val,const TypeOopPtr * val_type,Node * pre_val,BasicType bt) const188 void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
189                                                     bool do_load,
190                                                     Node* obj,
191                                                     Node* adr,
192                                                     uint alias_idx,
193                                                     Node* val,
194                                                     const TypeOopPtr* val_type,
195                                                     Node* pre_val,
196                                                     BasicType bt) const {
197   // Some sanity checks
198   // Note: val is unused in this routine.
199 
200   if (do_load) {
201     // We need to generate the load of the previous value
202     assert(obj != NULL, "must have a base");
203     assert(adr != NULL, "where are loading from?");
204     assert(pre_val == NULL, "loaded already?");
205     assert(val_type != NULL, "need a type");
206 
207     if (ReduceInitialCardMarks
208         && satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
209       return;
210     }
211 
212   } else {
213     // In this case both val_type and alias_idx are unused.
214     assert(pre_val != NULL, "must be loaded already");
215     // Nothing to be done if pre_val is null.
216     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
217     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
218   }
219   assert(bt == T_OBJECT, "or we shouldn't be here");
220 
221   IdealKit ideal(kit, true);
222 
223   Node* tls = __ thread(); // ThreadLocalStorage
224 
225   Node* no_base = __ top();
226   Node* zero  = __ ConI(0);
227   Node* zeroX = __ ConX(0);
228 
229   float likely  = PROB_LIKELY(0.999);
230   float unlikely  = PROB_UNLIKELY(0.999);
231 
232   // Offsets into the thread
233   const int index_offset   = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
234   const int buffer_offset  = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
235 
236   // Now the actual pointers into the thread
237   Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
238   Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
239 
240   // Now some of the values
241   Node* marking;
242   Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
243   Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
244   marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
245   assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
246 
247   // if (!marking)
248   __ if_then(marking, BoolTest::ne, zero, unlikely); {
249     BasicType index_bt = TypeX_X->basic_type();
250     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
251     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
252 
253     if (do_load) {
254       // load original value
255       // alias_idx correct??
256       pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
257     }
258 
259     // if (pre_val != NULL)
260     __ if_then(pre_val, BoolTest::ne, kit->null()); {
261       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
262 
263       // is the queue for this thread full?
264       __ if_then(index, BoolTest::ne, zeroX, likely); {
265 
266         // decrement the index
267         Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
268 
269         // Now get the buffer location we will log the previous value into and store it
270         Node *log_addr = __ AddP(no_base, buffer, next_index);
271         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
272         // update the index
273         __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
274 
275       } __ else_(); {
276 
277         // logging buffer is full, call the runtime
278         const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
279         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
280       } __ end_if();  // (!index)
281     } __ end_if();  // (pre_val != NULL)
282   } __ end_if();  // (!marking)
283 
284   // Final sync IdealKit and GraphKit.
285   kit->final_sync(ideal);
286 
287   if (ShenandoahSATBBarrier && adr != NULL) {
288     Node* c = kit->control();
289     Node* call = c->in(1)->in(1)->in(1)->in(0);
290     assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
291     call->add_req(adr);
292   }
293 }
294 
is_shenandoah_wb_pre_call(Node * call)295 bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
296   return call->is_CallLeaf() &&
297          call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry);
298 }
299 
is_shenandoah_lrb_call(Node * call)300 bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) {
301   if (!call->is_CallLeaf()) {
302     return false;
303   }
304 
305   address entry_point = call->as_CallLeaf()->entry_point();
306   return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)) ||
307          (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));
308 }
309 
is_shenandoah_marking_if(PhaseTransform * phase,Node * n)310 bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
311   if (n->Opcode() != Op_If) {
312     return false;
313   }
314 
315   Node* bol = n->in(1);
316   assert(bol->is_Bool(), "");
317   Node* cmpx = bol->in(1);
318   if (bol->as_Bool()->_test._test == BoolTest::ne &&
319       cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) &&
320       is_shenandoah_state_load(cmpx->in(1)->in(1)) &&
321       cmpx->in(1)->in(2)->is_Con() &&
322       cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) {
323     return true;
324   }
325 
326   return false;
327 }
328 
is_shenandoah_state_load(Node * n)329 bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) {
330   if (!n->is_Load()) return false;
331   const int state_offset = in_bytes(ShenandoahThreadLocalData::gc_state_offset());
332   return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal
333          && n->in(2)->in(3)->is_Con()
334          && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset;
335 }
336 
shenandoah_write_barrier_pre(GraphKit * kit,bool do_load,Node * obj,Node * adr,uint alias_idx,Node * val,const TypeOopPtr * val_type,Node * pre_val,BasicType bt) const337 void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
338                                                           bool do_load,
339                                                           Node* obj,
340                                                           Node* adr,
341                                                           uint alias_idx,
342                                                           Node* val,
343                                                           const TypeOopPtr* val_type,
344                                                           Node* pre_val,
345                                                           BasicType bt) const {
346   if (ShenandoahSATBBarrier) {
347     IdealKit ideal(kit);
348     kit->sync_kit(ideal);
349 
350     satb_write_barrier_pre(kit, do_load, obj, adr, alias_idx, val, val_type, pre_val, bt);
351 
352     ideal.sync_kit(kit);
353     kit->final_sync(ideal);
354   }
355 }
356 
357 // Helper that guards and inserts a pre-barrier.
insert_pre_barrier(GraphKit * kit,Node * base_oop,Node * offset,Node * pre_val,bool need_mem_bar) const358 void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
359                                                 Node* pre_val, bool need_mem_bar) const {
360   // We could be accessing the referent field of a reference object. If so, when G1
361   // is enabled, we need to log the value in the referent field in an SATB buffer.
362   // This routine performs some compile time filters and generates suitable
363   // runtime filters that guard the pre-barrier code.
364   // Also add memory barrier for non volatile load from the referent field
365   // to prevent commoning of loads across safepoint.
366 
367   // Some compile time checks.
368 
369   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
370   const TypeX* otype = offset->find_intptr_t_type();
371   if (otype != NULL && otype->is_con() &&
372       otype->get_con() != java_lang_ref_Reference::referent_offset) {
373     // Constant offset but not the reference_offset so just return
374     return;
375   }
376 
377   // We only need to generate the runtime guards for instances.
378   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
379   if (btype != NULL) {
380     if (btype->isa_aryptr()) {
381       // Array type so nothing to do
382       return;
383     }
384 
385     const TypeInstPtr* itype = btype->isa_instptr();
386     if (itype != NULL) {
387       // Can the klass of base_oop be statically determined to be
388       // _not_ a sub-class of Reference and _not_ Object?
389       ciKlass* klass = itype->klass();
390       if ( klass->is_loaded() &&
391           !klass->is_subtype_of(kit->env()->Reference_klass()) &&
392           !kit->env()->Object_klass()->is_subtype_of(klass)) {
393         return;
394       }
395     }
396   }
397 
398   // The compile time filters did not reject base_oop/offset so
399   // we need to generate the following runtime filters
400   //
401   // if (offset == java_lang_ref_Reference::_reference_offset) {
402   //   if (instance_of(base, java.lang.ref.Reference)) {
403   //     pre_barrier(_, pre_val, ...);
404   //   }
405   // }
406 
407   float likely   = PROB_LIKELY(  0.999);
408   float unlikely = PROB_UNLIKELY(0.999);
409 
410   IdealKit ideal(kit);
411 
412   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
413 
414   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
415       // Update graphKit memory and control from IdealKit.
416       kit->sync_kit(ideal);
417 
418       Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
419       Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
420 
421       // Update IdealKit memory and control from graphKit.
422       __ sync_kit(kit);
423 
424       Node* one = __ ConI(1);
425       // is_instof == 0 if base_oop == NULL
426       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
427 
428         // Update graphKit from IdeakKit.
429         kit->sync_kit(ideal);
430 
431         // Use the pre-barrier to record the value in the referent field
432         satb_write_barrier_pre(kit, false /* do_load */,
433                                NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
434                                pre_val /* pre_val */,
435                                T_OBJECT);
436         if (need_mem_bar) {
437           // Add memory barrier to prevent commoning reads from this field
438           // across safepoint since GC can change its value.
439           kit->insert_mem_bar(Op_MemBarCPUOrder);
440         }
441         // Update IdealKit from graphKit.
442         __ sync_kit(kit);
443 
444       } __ end_if(); // _ref_type != ref_none
445   } __ end_if(); // offset == referent_offset
446 
447   // Final sync IdealKit and GraphKit.
448   kit->final_sync(ideal);
449 }
450 
451 #undef __
452 
write_ref_field_pre_entry_Type()453 const TypeFunc* ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type() {
454   const Type **fields = TypeTuple::fields(2);
455   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
456   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
457   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
458 
459   // create result type (range)
460   fields = TypeTuple::fields(0);
461   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
462 
463   return TypeFunc::make(domain, range);
464 }
465 
shenandoah_clone_barrier_Type()466 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
467   const Type **fields = TypeTuple::fields(1);
468   fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
469   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
470 
471   // create result type (range)
472   fields = TypeTuple::fields(0);
473   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
474 
475   return TypeFunc::make(domain, range);
476 }
477 
shenandoah_load_reference_barrier_Type()478 const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() {
479   const Type **fields = TypeTuple::fields(2);
480   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
481   fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;   // original load address
482 
483   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
484 
485   // create result type (range)
486   fields = TypeTuple::fields(1);
487   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
488   const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
489 
490   return TypeFunc::make(domain, range);
491 }
492 
store_at_resolved(C2Access & access,C2AccessValue & val) const493 Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
494   DecoratorSet decorators = access.decorators();
495 
496   const TypePtr* adr_type = access.addr().type();
497   Node* adr = access.addr().node();
498 
499   bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
500   bool on_heap = (decorators & IN_HEAP) != 0;
501 
502   if (!access.is_oop() || (!on_heap && !anonymous)) {
503     return BarrierSetC2::store_at_resolved(access, val);
504   }
505 
506   GraphKit* kit = access.kit();
507 
508   uint adr_idx = kit->C->get_alias_index(adr_type);
509   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
510   Node* value = val.node();
511   value = shenandoah_iu_barrier(kit, value);
512   val.set_node(value);
513   shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
514                                static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
515   return BarrierSetC2::store_at_resolved(access, val);
516 }
517 
load_at_resolved(C2Access & access,const Type * val_type) const518 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
519   // 1: non-reference load, no additional barrier is needed
520   if (!access.is_oop()) {
521     return BarrierSetC2::load_at_resolved(access, val_type);;
522   }
523 
524   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
525   DecoratorSet decorators = access.decorators();
526   BasicType type = access.type();
527 
528   // 2: apply LRB if needed
529   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
530     load = new ShenandoahLoadReferenceBarrierNode(NULL, load);
531     load = access.kit()->gvn().transform(load);
532   }
533 
534   // 3: apply keep-alive barrier if needed
535   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
536     Node* top = Compile::current()->top();
537     Node* adr = access.addr().node();
538     Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
539     Node* obj = access.base();
540 
541     bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
542     bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
543     bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
544 
545     // If we are reading the value of the referent field of a Reference
546     // object (either by using Unsafe directly or through reflection)
547     // then, if SATB is enabled, we need to record the referent in an
548     // SATB log buffer using the pre-barrier mechanism.
549     // Also we need to add memory barrier to prevent commoning reads
550     // from this field across safepoint since GC can change its value.
551     if (!on_weak_ref || (unknown && (offset == top || obj == top)) || !keep_alive) {
552       return load;
553     }
554     GraphKit* kit = access.kit();
555     bool mismatched = (decorators & C2_MISMATCHED) != 0;
556     bool is_unordered = (decorators & MO_UNORDERED) != 0;
557     bool need_cpu_mem_bar = !is_unordered || mismatched;
558 
559     if (on_weak_ref) {
560       // Use the pre-barrier to record the value in the referent field
561       satb_write_barrier_pre(kit, false /* do_load */,
562                              NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
563                              load /* pre_val */, T_OBJECT);
564       // Add memory barrier to prevent commoning reads from this field
565       // across safepoint since GC can change its value.
566       kit->insert_mem_bar(Op_MemBarCPUOrder);
567     } else if (unknown) {
568       // We do not require a mem bar inside pre_barrier if need_mem_bar
569       // is set: the barriers would be emitted by us.
570       insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
571     }
572   }
573 
574   return load;
575 }
576 
pin_atomic_op(C2AtomicAccess & access)577 static void pin_atomic_op(C2AtomicAccess& access) {
578   if (!access.needs_pinning()) {
579     return;
580   }
581   // SCMemProjNodes represent the memory state of a LoadStore. Their
582   // main role is to prevent LoadStore nodes from being optimized away
583   // when their results aren't used.
584   GraphKit* kit = access.kit();
585   Node* load_store = access.raw_access();
586   assert(load_store != NULL, "must pin atomic op");
587   Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
588   kit->set_memory(proj, access.alias_idx());
589 }
590 
atomic_cmpxchg_val_at_resolved(C2AtomicAccess & access,Node * expected_val,Node * new_val,const Type * value_type) const591 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
592                                                    Node* new_val, const Type* value_type) const {
593   GraphKit* kit = access.kit();
594   if (access.is_oop()) {
595     new_val = shenandoah_iu_barrier(kit, new_val);
596     shenandoah_write_barrier_pre(kit, false /* do_load */,
597                                  NULL, NULL, max_juint, NULL, NULL,
598                                  expected_val /* pre_val */, T_OBJECT);
599 
600     MemNode::MemOrd mo = access.mem_node_mo();
601     Node* mem = access.memory();
602     Node* adr = access.addr().node();
603     const TypePtr* adr_type = access.addr().type();
604     Node* load_store = NULL;
605 
606 #ifdef _LP64
607     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
608       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
609       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
610       if (ShenandoahCASBarrier) {
611         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
612       } else {
613         load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
614       }
615     } else
616 #endif
617     {
618       if (ShenandoahCASBarrier) {
619         load_store = kit->gvn().transform(new ShenandoahCompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
620       } else {
621         load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
622       }
623     }
624 
625     access.set_raw_access(load_store);
626     pin_atomic_op(access);
627 
628 #ifdef _LP64
629     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
630       load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
631     }
632 #endif
633     load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store));
634     return load_store;
635   }
636   return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
637 }
638 
atomic_cmpxchg_bool_at_resolved(C2AtomicAccess & access,Node * expected_val,Node * new_val,const Type * value_type) const639 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
640                                                               Node* new_val, const Type* value_type) const {
641   GraphKit* kit = access.kit();
642   if (access.is_oop()) {
643     new_val = shenandoah_iu_barrier(kit, new_val);
644     shenandoah_write_barrier_pre(kit, false /* do_load */,
645                                  NULL, NULL, max_juint, NULL, NULL,
646                                  expected_val /* pre_val */, T_OBJECT);
647     DecoratorSet decorators = access.decorators();
648     MemNode::MemOrd mo = access.mem_node_mo();
649     Node* mem = access.memory();
650     bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
651     Node* load_store = NULL;
652     Node* adr = access.addr().node();
653 #ifdef _LP64
654     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
655       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
656       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
657       if (ShenandoahCASBarrier) {
658         if (is_weak_cas) {
659           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
660         } else {
661           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
662         }
663       } else {
664         if (is_weak_cas) {
665           load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
666         } else {
667           load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
668         }
669       }
670     } else
671 #endif
672     {
673       if (ShenandoahCASBarrier) {
674         if (is_weak_cas) {
675           load_store = kit->gvn().transform(new ShenandoahWeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
676         } else {
677           load_store = kit->gvn().transform(new ShenandoahCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
678         }
679       } else {
680         if (is_weak_cas) {
681           load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
682         } else {
683           load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
684         }
685       }
686     }
687     access.set_raw_access(load_store);
688     pin_atomic_op(access);
689     return load_store;
690   }
691   return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
692 }
693 
atomic_xchg_at_resolved(C2AtomicAccess & access,Node * val,const Type * value_type) const694 Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* val, const Type* value_type) const {
695   GraphKit* kit = access.kit();
696   if (access.is_oop()) {
697     val = shenandoah_iu_barrier(kit, val);
698   }
699   Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
700   if (access.is_oop()) {
701     result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result));
702     shenandoah_write_barrier_pre(kit, false /* do_load */,
703                                  NULL, NULL, max_juint, NULL, NULL,
704                                  result /* pre_val */, T_OBJECT);
705   }
706   return result;
707 }
708 
709 // Support for GC barriers emitted during parsing
is_gc_barrier_node(Node * node) const710 bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
711   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true;
712   if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
713     return false;
714   }
715   CallLeafNode *call = node->as_CallLeaf();
716   if (call->_name == NULL) {
717     return false;
718   }
719 
720   return strcmp(call->_name, "shenandoah_clone_barrier") == 0 ||
721          strcmp(call->_name, "shenandoah_cas_obj") == 0 ||
722          strcmp(call->_name, "shenandoah_wb_pre") == 0;
723 }
724 
step_over_gc_barrier(Node * c) const725 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
726   if (c == NULL) {
727     return c;
728   }
729   if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
730     return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
731   }
732   if (c->Opcode() == Op_ShenandoahIUBarrier) {
733     c = c->in(1);
734   }
735   return c;
736 }
737 
expand_barriers(Compile * C,PhaseIterGVN & igvn) const738 bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
739   return !ShenandoahBarrierC2Support::expand(C, igvn);
740 }
741 
optimize_loops(PhaseIdealLoop * phase,LoopOptsMode mode,VectorSet & visited,Node_Stack & nstack,Node_List & worklist) const742 bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
743   if (mode == LoopOptsShenandoahExpand) {
744     assert(UseShenandoahGC, "only for shenandoah");
745     ShenandoahBarrierC2Support::pin_and_expand(phase);
746     return true;
747   } else if (mode == LoopOptsShenandoahPostExpand) {
748     assert(UseShenandoahGC, "only for shenandoah");
749     visited.Clear();
750     ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase);
751     return true;
752   }
753   return false;
754 }
755 
array_copy_requires_gc_barriers(BasicType type) const756 bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(BasicType type) const {
757   return false;
758 }
759 
clone_needs_barrier(Node * src,PhaseGVN & gvn)760 bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) {
761   const TypeOopPtr* src_type = gvn.type(src)->is_oopptr();
762   if (src_type->isa_instptr() != NULL) {
763     ciInstanceKlass* ik = src_type->klass()->as_instance_klass();
764     if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) {
765       if (ik->has_object_fields()) {
766         return true;
767       } else {
768         if (!src_type->klass_is_exact()) {
769           Compile::current()->dependencies()->assert_leaf_type(ik);
770         }
771       }
772     } else {
773       return true;
774         }
775   } else if (src_type->isa_aryptr()) {
776     BasicType src_elem  = src_type->klass()->as_array_klass()->element_type()->basic_type();
777     if (src_elem == T_OBJECT || src_elem == T_ARRAY) {
778       return true;
779     }
780   } else {
781     return true;
782   }
783   return false;
784 }
785 
clone_at_expansion(PhaseMacroExpand * phase,ArrayCopyNode * ac) const786 void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
787   Node* ctrl = ac->in(TypeFunc::Control);
788   Node* mem = ac->in(TypeFunc::Memory);
789   Node* src = ac->in(ArrayCopyNode::Src);
790   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
791   Node* dest = ac->in(ArrayCopyNode::Dest);
792   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
793   Node* length = ac->in(ArrayCopyNode::Length);
794   assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null");
795   assert (src->is_AddP(), "for clone the src should be the interior ptr");
796   assert (dest->is_AddP(), "for clone the dst should be the interior ptr");
797 
798   if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) {
799     // Check if heap is has forwarded objects. If it does, we need to call into the special
800     // routine that would fix up source references before we can continue.
801 
802     enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
803     Node* region = new RegionNode(PATH_LIMIT);
804     Node* mem_phi = new PhiNode(region, Type::MEMORY, TypeRawPtr::BOTTOM);
805 
806     Node* thread = phase->transform_later(new ThreadLocalNode());
807     Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
808     Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
809 
810     uint gc_state_idx = Compile::AliasIdxRaw;
811     const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
812     debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
813 
814     Node* gc_state    = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
815     int flags = ShenandoahHeap::HAS_FORWARDED;
816     if (ShenandoahIUBarrier) {
817       flags |= ShenandoahHeap::MARKING;
818     }
819     Node* stable_and  = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(flags)));
820     Node* stable_cmp  = phase->transform_later(new CmpINode(stable_and, phase->igvn().zerocon(T_INT)));
821     Node* stable_test = phase->transform_later(new BoolNode(stable_cmp, BoolTest::ne));
822 
823     IfNode* stable_iff  = phase->transform_later(new IfNode(ctrl, stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN))->as_If();
824     Node* stable_ctrl   = phase->transform_later(new IfFalseNode(stable_iff));
825     Node* unstable_ctrl = phase->transform_later(new IfTrueNode(stable_iff));
826 
827     // Heap is stable, no need to do anything additional
828     region->init_req(_heap_stable, stable_ctrl);
829     mem_phi->init_req(_heap_stable, mem);
830 
831     // Heap is unstable, call into clone barrier stub
832     Node* call = phase->make_leaf_call(unstable_ctrl, mem,
833                     ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(),
834                     CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier),
835                     "shenandoah_clone",
836                     TypeRawPtr::BOTTOM,
837                     src->in(AddPNode::Base));
838     call = phase->transform_later(call);
839 
840     ctrl = phase->transform_later(new ProjNode(call, TypeFunc::Control));
841     mem = phase->transform_later(new ProjNode(call, TypeFunc::Memory));
842     region->init_req(_heap_unstable, ctrl);
843     mem_phi->init_req(_heap_unstable, mem);
844 
845     // Wire up the actual arraycopy stub now
846     ctrl = phase->transform_later(region);
847     mem = phase->transform_later(mem_phi);
848 
849     const char* name = "arraycopy";
850     call = phase->make_leaf_call(ctrl, mem,
851                                  OptoRuntime::fast_arraycopy_Type(),
852                                  phase->basictype2arraycopy(T_LONG, NULL, NULL, true, name, true),
853                                  name, TypeRawPtr::BOTTOM,
854                                  src, dest, length
855                                  LP64_ONLY(COMMA phase->top()));
856     call = phase->transform_later(call);
857 
858     // Hook up the whole thing into the graph
859     phase->igvn().replace_node(ac, call);
860   } else {
861     BarrierSetC2::clone_at_expansion(phase, ac);
862   }
863 }
864 
865 // Support for macro expanded GC barriers
register_potential_barrier_node(Node * node) const866 void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
867   if (node->Opcode() == Op_ShenandoahIUBarrier) {
868     state()->add_iu_barrier((ShenandoahIUBarrierNode*) node);
869   }
870   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
871     state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
872   }
873 }
874 
unregister_potential_barrier_node(Node * node) const875 void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
876   if (node->Opcode() == Op_ShenandoahIUBarrier) {
877     state()->remove_iu_barrier((ShenandoahIUBarrierNode*) node);
878   }
879   if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
880     state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
881   }
882 }
883 
eliminate_gc_barrier(PhaseMacroExpand * macro,Node * n) const884 void ShenandoahBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* n) const {
885   if (is_shenandoah_wb_pre_call(n)) {
886     shenandoah_eliminate_wb_pre(n, &macro->igvn());
887   }
888 }
889 
shenandoah_eliminate_wb_pre(Node * call,PhaseIterGVN * igvn) const890 void ShenandoahBarrierSetC2::shenandoah_eliminate_wb_pre(Node* call, PhaseIterGVN* igvn) const {
891   assert(UseShenandoahGC && is_shenandoah_wb_pre_call(call), "");
892   Node* c = call->as_Call()->proj_out(TypeFunc::Control);
893   c = c->unique_ctrl_out();
894   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
895   c = c->unique_ctrl_out();
896   assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
897   Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
898   assert(iff->is_If(), "expect test");
899   if (!is_shenandoah_marking_if(igvn, iff)) {
900     c = c->unique_ctrl_out();
901     assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?");
902     iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0);
903     assert(is_shenandoah_marking_if(igvn, iff), "expect marking test");
904   }
905   Node* cmpx = iff->in(1)->in(1);
906   igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ));
907   igvn->rehash_node_delayed(call);
908   call->del_req(call->req()-1);
909 }
910 
enqueue_useful_gc_barrier(Unique_Node_List & worklist,Node * node) const911 void ShenandoahBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
912   if (node->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(node)) {
913     for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) {
914       Node* use = node->fast_out(i);
915       worklist.push(use);
916     }
917   }
918 }
919 
eliminate_useless_gc_barriers(Unique_Node_List & useful) const920 void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
921   for (uint i = 0; i < useful.size(); i++) {
922     Node* n = useful.at(i);
923     if (n->Opcode() == Op_AddP && ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(n)) {
924       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
925         Compile::current()->record_for_igvn(n->fast_out(i));
926       }
927     }
928   }
929   for (int i = state()->iu_barriers_count() - 1; i >= 0; i--) {
930     ShenandoahIUBarrierNode* n = state()->iu_barrier(i);
931     if (!useful.member(n)) {
932       state()->remove_iu_barrier(n);
933     }
934   }
935   for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) {
936     ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i);
937     if (!useful.member(n)) {
938       state()->remove_load_reference_barrier(n);
939     }
940   }
941 }
942 
add_users_to_worklist(Unique_Node_List * worklist) const943 void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
944 
create_barrier_state(Arena * comp_arena) const945 void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
946   return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
947 }
948 
state() const949 ShenandoahBarrierSetC2State* ShenandoahBarrierSetC2::state() const {
950   return reinterpret_cast<ShenandoahBarrierSetC2State*>(Compile::current()->barrier_set_state());
951 }
952 
953 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
954 // expanded later, then now is the time to do so.
expand_macro_nodes(PhaseMacroExpand * macro) const955 bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const { return false; }
956 
957 #ifdef ASSERT
verify_gc_barriers(bool post_parse) const958 void ShenandoahBarrierSetC2::verify_gc_barriers(bool post_parse) const {
959   if (ShenandoahVerifyOptoBarriers && !post_parse) {
960     ShenandoahBarrierC2Support::verify(Compile::current()->root());
961   }
962 }
963 #endif
964 
ideal_node(PhaseGVN * phase,Node * n,bool can_reshape) const965 Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const {
966   if (is_shenandoah_wb_pre_call(n)) {
967     uint cnt = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type()->domain()->cnt();
968     if (n->req() > cnt) {
969       Node* addp = n->in(cnt);
970       if (has_only_shenandoah_wb_pre_uses(addp)) {
971         n->del_req(cnt);
972         if (can_reshape) {
973           phase->is_IterGVN()->_worklist.push(addp);
974         }
975         return n;
976       }
977     }
978   }
979   if (n->Opcode() == Op_CmpP) {
980     Node* in1 = n->in(1);
981     Node* in2 = n->in(2);
982     if (in1->bottom_type() == TypePtr::NULL_PTR) {
983       in2 = step_over_gc_barrier(in2);
984     }
985     if (in2->bottom_type() == TypePtr::NULL_PTR) {
986       in1 = step_over_gc_barrier(in1);
987     }
988     PhaseIterGVN* igvn = phase->is_IterGVN();
989     if (in1 != n->in(1)) {
990       if (igvn != NULL) {
991         n->set_req_X(1, in1, igvn);
992       } else {
993         n->set_req(1, in1);
994       }
995       assert(in2 == n->in(2), "only one change");
996       return n;
997     }
998     if (in2 != n->in(2)) {
999       if (igvn != NULL) {
1000         n->set_req_X(2, in2, igvn);
1001       } else {
1002         n->set_req(2, in2);
1003       }
1004       return n;
1005     }
1006   } else if (can_reshape &&
1007              n->Opcode() == Op_If &&
1008              ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
1009              n->in(0) != NULL) {
1010     Node* dom = n->in(0);
1011     Node* prev_dom = n;
1012     int op = n->Opcode();
1013     int dist = 16;
1014     // Search up the dominator tree for another heap stable test
1015     while (dom->Opcode() != op    ||  // Not same opcode?
1016            !ShenandoahBarrierC2Support::is_heap_stable_test(dom) ||  // Not same input 1?
1017            prev_dom->in(0) != dom) {  // One path of test does not dominate?
1018       if (dist < 0) return NULL;
1019 
1020       dist--;
1021       prev_dom = dom;
1022       dom = IfNode::up_one_dom(dom);
1023       if (!dom) return NULL;
1024     }
1025 
1026     // Check that we did not follow a loop back to ourselves
1027     if (n == dom) {
1028       return NULL;
1029     }
1030 
1031     return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN());
1032   }
1033   return NULL;
1034 }
1035 
has_only_shenandoah_wb_pre_uses(Node * n)1036 bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
1037   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1038     Node* u = n->fast_out(i);
1039     if (!is_shenandoah_wb_pre_call(u)) {
1040       return false;
1041     }
1042   }
1043   return n->outcnt() > 0;
1044 }
1045 
arraycopy_load_reference_barrier(PhaseGVN * phase,Node * v)1046 Node* ShenandoahBarrierSetC2::arraycopy_load_reference_barrier(PhaseGVN *phase, Node* v) {
1047   if (ShenandoahLoadRefBarrier) {
1048     return phase->transform(new ShenandoahLoadReferenceBarrierNode(NULL, v));
1049   }
1050   if (ShenandoahIUBarrier) {
1051     return phase->transform(new ShenandoahIUBarrierNode(v));
1052   }
1053   return v;
1054 }
1055 
1056