1 /*
2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "opto/arraycopynode.hpp"
28 #include "opto/graphKit.hpp"
29 #include "opto/idealKit.hpp"
30 #include "opto/macro.hpp"
31 #include "opto/narrowptrnode.hpp"
32 #include "opto/runtime.hpp"
33 #include "utilities/macros.hpp"
34 
35 // By default this is a no-op.
resolve_address(C2Access & access) const36 void BarrierSetC2::resolve_address(C2Access& access) const { }
37 
barrier_set_state() const38 void* C2Access::barrier_set_state() const {
39   return _kit->barrier_set_state();
40 }
41 
needs_cpu_membar() const42 bool C2Access::needs_cpu_membar() const {
43   bool mismatched = (_decorators & C2_MISMATCHED) != 0;
44   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
45   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
46   bool in_heap = (_decorators & IN_HEAP) != 0;
47 
48   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
49   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
50   bool is_atomic = is_read && is_write;
51 
52   if (is_atomic) {
53     // Atomics always need to be wrapped in CPU membars
54     return true;
55   }
56 
57   if (anonymous) {
58     // We will need memory barriers unless we can determine a unique
59     // alias category for this reference.  (Note:  If for some reason
60     // the barriers get omitted and the unsafe reference begins to "pollute"
61     // the alias analysis of the rest of the graph, either Compile::can_alias
62     // or Compile::must_alias will throw a diagnostic assert.)
63     if (!in_heap || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) {
64       return true;
65     }
66   }
67 
68   return false;
69 }
70 
store_at_resolved(C2Access & access,C2AccessValue & val) const71 Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
72   DecoratorSet decorators = access.decorators();
73   GraphKit* kit = access.kit();
74 
75   bool mismatched = (decorators & C2_MISMATCHED) != 0;
76   bool unaligned = (decorators & C2_UNALIGNED) != 0;
77   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
78   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
79 
80   bool in_native = (decorators & IN_NATIVE) != 0;
81   assert(!in_native, "not supported yet");
82 
83   if (access.type() == T_DOUBLE) {
84     Node* new_val = kit->dstore_rounding(val.node());
85     val.set_node(new_val);
86   }
87 
88   MemNode::MemOrd mo = access.mem_node_mo();
89 
90   Node* store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
91                                      access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
92   access.set_raw_access(store);
93   return store;
94 }
95 
load_at_resolved(C2Access & access,const Type * val_type) const96 Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
97   DecoratorSet decorators = access.decorators();
98   GraphKit* kit = access.kit();
99 
100   Node* adr = access.addr().node();
101   const TypePtr* adr_type = access.addr().type();
102 
103   bool mismatched = (decorators & C2_MISMATCHED) != 0;
104   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
105   bool unaligned = (decorators & C2_UNALIGNED) != 0;
106   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
107   bool pinned = (decorators & C2_PINNED_LOAD) != 0;
108   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
109 
110   bool in_native = (decorators & IN_NATIVE) != 0;
111 
112   MemNode::MemOrd mo = access.mem_node_mo();
113   LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
114   Node* control = control_dependent ? kit->control() : NULL;
115 
116   Node* load;
117   if (in_native) {
118     load = kit->make_load(control, adr, val_type, access.type(), mo);
119   } else {
120     load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
121                           dep, requires_atomic_access, unaligned, mismatched, unsafe);
122   }
123 
124   access.set_raw_access(load);
125 
126   return load;
127 }
128 
129 class C2AccessFence: public StackObj {
130   C2Access& _access;
131   Node* _leading_membar;
132 
133 public:
C2AccessFence(C2Access & access)134   C2AccessFence(C2Access& access) :
135     _access(access), _leading_membar(NULL) {
136     GraphKit* kit = access.kit();
137     DecoratorSet decorators = access.decorators();
138 
139     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
140     bool is_read = (decorators & C2_READ_ACCESS) != 0;
141     bool is_atomic = is_read && is_write;
142 
143     bool is_volatile = (decorators & MO_SEQ_CST) != 0;
144     bool is_release = (decorators & MO_RELEASE) != 0;
145 
146     if (is_atomic) {
147       // Memory-model-wise, a LoadStore acts like a little synchronized
148       // block, so needs barriers on each side.  These don't translate
149       // into actual barriers on most machines, but we still need rest of
150       // compiler to respect ordering.
151       if (is_release) {
152         _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
153       } else if (is_volatile) {
154         if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
155           _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
156         } else {
157           _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
158         }
159       }
160     } else if (is_write) {
161       // If reference is volatile, prevent following memory ops from
162       // floating down past the volatile write.  Also prevents commoning
163       // another volatile read.
164       if (is_volatile || is_release) {
165         _leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
166       }
167     } else {
168       // Memory barrier to prevent normal and 'unsafe' accesses from
169       // bypassing each other.  Happens after null checks, so the
170       // exception paths do not take memory state from the memory barrier,
171       // so there's no problems making a strong assert about mixing users
172       // of safe & unsafe memory.
173       if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
174         _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
175       }
176     }
177 
178     if (access.needs_cpu_membar()) {
179       kit->insert_mem_bar(Op_MemBarCPUOrder);
180     }
181 
182     if (is_atomic) {
183       // 4984716: MemBars must be inserted before this
184       //          memory node in order to avoid a false
185       //          dependency which will confuse the scheduler.
186       access.set_memory();
187     }
188   }
189 
~C2AccessFence()190   ~C2AccessFence() {
191     GraphKit* kit = _access.kit();
192     DecoratorSet decorators = _access.decorators();
193 
194     bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
195     bool is_read = (decorators & C2_READ_ACCESS) != 0;
196     bool is_atomic = is_read && is_write;
197 
198     bool is_volatile = (decorators & MO_SEQ_CST) != 0;
199     bool is_acquire = (decorators & MO_ACQUIRE) != 0;
200 
201     // If reference is volatile, prevent following volatiles ops from
202     // floating up before the volatile access.
203     if (_access.needs_cpu_membar()) {
204       kit->insert_mem_bar(Op_MemBarCPUOrder);
205     }
206 
207     if (is_atomic) {
208       if (is_acquire || is_volatile) {
209         Node* n = _access.raw_access();
210         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
211         if (_leading_membar != NULL) {
212           MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
213         }
214       }
215     } else if (is_write) {
216       // If not multiple copy atomic, we do the MemBarVolatile before the load.
217       if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
218         Node* n = _access.raw_access();
219         Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar
220         if (_leading_membar != NULL) {
221           MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
222         }
223       }
224     } else {
225       if (is_volatile || is_acquire) {
226         Node* n = _access.raw_access();
227         assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
228         Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
229         mb->as_MemBar()->set_trailing_load();
230       }
231     }
232   }
233 };
234 
store_at(C2Access & access,C2AccessValue & val) const235 Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
236   C2AccessFence fence(access);
237   resolve_address(access);
238   return store_at_resolved(access, val);
239 }
240 
load_at(C2Access & access,const Type * val_type) const241 Node* BarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
242   C2AccessFence fence(access);
243   resolve_address(access);
244   return load_at_resolved(access, val_type);
245 }
246 
mem_node_mo() const247 MemNode::MemOrd C2Access::mem_node_mo() const {
248   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
249   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
250   if ((_decorators & MO_SEQ_CST) != 0) {
251     if (is_write && is_read) {
252       // For atomic operations
253       return MemNode::seqcst;
254     } else if (is_write) {
255       return MemNode::release;
256     } else {
257       assert(is_read, "what else?");
258       return MemNode::acquire;
259     }
260   } else if ((_decorators & MO_RELEASE) != 0) {
261     return MemNode::release;
262   } else if ((_decorators & MO_ACQUIRE) != 0) {
263     return MemNode::acquire;
264   } else if (is_write) {
265     // Volatile fields need releasing stores.
266     // Non-volatile fields also need releasing stores if they hold an
267     // object reference, because the object reference might point to
268     // a freshly created object.
269     // Conservatively release stores of object references.
270     return StoreNode::release_if_reference(_type);
271   } else {
272     return MemNode::unordered;
273   }
274 }
275 
fixup_decorators()276 void C2Access::fixup_decorators() {
277   bool default_mo = (_decorators & MO_DECORATOR_MASK) == 0;
278   bool is_unordered = (_decorators & MO_UNORDERED) != 0 || default_mo;
279   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
280 
281   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
282   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
283 
284   if (AlwaysAtomicAccesses && is_unordered) {
285     _decorators &= ~MO_DECORATOR_MASK; // clear the MO bits
286     _decorators |= MO_RELAXED; // Force the MO_RELAXED decorator with AlwaysAtomicAccess
287   }
288 
289   _decorators = AccessInternal::decorator_fixup(_decorators);
290 
291   if (is_read && !is_write && anonymous) {
292     // To be valid, unsafe loads may depend on other conditions than
293     // the one that guards them: pin the Load node
294     _decorators |= C2_CONTROL_DEPENDENT_LOAD;
295     _decorators |= C2_PINNED_LOAD;
296     const TypePtr* adr_type = _addr.type();
297     Node* adr = _addr.node();
298     if (!needs_cpu_membar() && adr_type->isa_instptr()) {
299       assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
300       intptr_t offset = Type::OffsetBot;
301       AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset);
302       if (offset >= 0) {
303         int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
304         if (offset < s) {
305           // Guaranteed to be a valid access, no need to pin it
306           _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
307           _decorators ^= C2_PINNED_LOAD;
308         }
309       }
310     }
311   }
312 }
313 
314 //--------------------------- atomic operations---------------------------------
315 
pin_atomic_op(C2AtomicAccess & access)316 static void pin_atomic_op(C2AtomicAccess& access) {
317   if (!access.needs_pinning()) {
318     return;
319   }
320   // SCMemProjNodes represent the memory state of a LoadStore. Their
321   // main role is to prevent LoadStore nodes from being optimized away
322   // when their results aren't used.
323   GraphKit* kit = access.kit();
324   Node* load_store = access.raw_access();
325   assert(load_store != NULL, "must pin atomic op");
326   Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
327   kit->set_memory(proj, access.alias_idx());
328 }
329 
set_memory()330 void C2AtomicAccess::set_memory() {
331   Node *mem = _kit->memory(_alias_idx);
332   _memory = mem;
333 }
334 
atomic_cmpxchg_val_at_resolved(C2AtomicAccess & access,Node * expected_val,Node * new_val,const Type * value_type) const335 Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
336                                                    Node* new_val, const Type* value_type) const {
337   GraphKit* kit = access.kit();
338   MemNode::MemOrd mo = access.mem_node_mo();
339   Node* mem = access.memory();
340 
341   Node* adr = access.addr().node();
342   const TypePtr* adr_type = access.addr().type();
343 
344   Node* load_store = NULL;
345 
346   if (access.is_oop()) {
347 #ifdef _LP64
348     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
349       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
350       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
351       load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
352     } else
353 #endif
354     {
355       load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
356     }
357   } else {
358     switch (access.type()) {
359       case T_BYTE: {
360         load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
361         break;
362       }
363       case T_SHORT: {
364         load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
365         break;
366       }
367       case T_INT: {
368         load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
369         break;
370       }
371       case T_LONG: {
372         load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
373         break;
374       }
375       default:
376         ShouldNotReachHere();
377     }
378   }
379 
380   access.set_raw_access(load_store);
381   pin_atomic_op(access);
382 
383 #ifdef _LP64
384   if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
385     return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
386   }
387 #endif
388 
389   return load_store;
390 }
391 
atomic_cmpxchg_bool_at_resolved(C2AtomicAccess & access,Node * expected_val,Node * new_val,const Type * value_type) const392 Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
393                                                     Node* new_val, const Type* value_type) const {
394   GraphKit* kit = access.kit();
395   DecoratorSet decorators = access.decorators();
396   MemNode::MemOrd mo = access.mem_node_mo();
397   Node* mem = access.memory();
398   bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
399   Node* load_store = NULL;
400   Node* adr = access.addr().node();
401 
402   if (access.is_oop()) {
403 #ifdef _LP64
404     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
405       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
406       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
407       if (is_weak_cas) {
408         load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
409       } else {
410         load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
411       }
412     } else
413 #endif
414     {
415       if (is_weak_cas) {
416         load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
417       } else {
418         load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
419       }
420     }
421   } else {
422     switch(access.type()) {
423       case T_BYTE: {
424         if (is_weak_cas) {
425           load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
426         } else {
427           load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
428         }
429         break;
430       }
431       case T_SHORT: {
432         if (is_weak_cas) {
433           load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
434         } else {
435           load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
436         }
437         break;
438       }
439       case T_INT: {
440         if (is_weak_cas) {
441           load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
442         } else {
443           load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
444         }
445         break;
446       }
447       case T_LONG: {
448         if (is_weak_cas) {
449           load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
450         } else {
451           load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
452         }
453         break;
454       }
455       default:
456         ShouldNotReachHere();
457     }
458   }
459 
460   access.set_raw_access(load_store);
461   pin_atomic_op(access);
462 
463   return load_store;
464 }
465 
atomic_xchg_at_resolved(C2AtomicAccess & access,Node * new_val,const Type * value_type) const466 Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
467   GraphKit* kit = access.kit();
468   Node* mem = access.memory();
469   Node* adr = access.addr().node();
470   const TypePtr* adr_type = access.addr().type();
471   Node* load_store = NULL;
472 
473   if (access.is_oop()) {
474 #ifdef _LP64
475     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
476       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
477       load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
478     } else
479 #endif
480     {
481       load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
482     }
483   } else  {
484     switch (access.type()) {
485       case T_BYTE:
486         load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
487         break;
488       case T_SHORT:
489         load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
490         break;
491       case T_INT:
492         load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
493         break;
494       case T_LONG:
495         load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
496         break;
497       default:
498         ShouldNotReachHere();
499     }
500   }
501 
502   access.set_raw_access(load_store);
503   pin_atomic_op(access);
504 
505 #ifdef _LP64
506   if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
507     return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
508   }
509 #endif
510 
511   return load_store;
512 }
513 
atomic_add_at_resolved(C2AtomicAccess & access,Node * new_val,const Type * value_type) const514 Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
515   Node* load_store = NULL;
516   GraphKit* kit = access.kit();
517   Node* adr = access.addr().node();
518   const TypePtr* adr_type = access.addr().type();
519   Node* mem = access.memory();
520 
521   switch(access.type()) {
522     case T_BYTE:
523       load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
524       break;
525     case T_SHORT:
526       load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
527       break;
528     case T_INT:
529       load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
530       break;
531     case T_LONG:
532       load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
533       break;
534     default:
535       ShouldNotReachHere();
536   }
537 
538   access.set_raw_access(load_store);
539   pin_atomic_op(access);
540 
541   return load_store;
542 }
543 
atomic_cmpxchg_val_at(C2AtomicAccess & access,Node * expected_val,Node * new_val,const Type * value_type) const544 Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
545                                           Node* new_val, const Type* value_type) const {
546   C2AccessFence fence(access);
547   resolve_address(access);
548   return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
549 }
550 
atomic_cmpxchg_bool_at(C2AtomicAccess & access,Node * expected_val,Node * new_val,const Type * value_type) const551 Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
552                                            Node* new_val, const Type* value_type) const {
553   C2AccessFence fence(access);
554   resolve_address(access);
555   return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
556 }
557 
atomic_xchg_at(C2AtomicAccess & access,Node * new_val,const Type * value_type) const558 Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
559   C2AccessFence fence(access);
560   resolve_address(access);
561   return atomic_xchg_at_resolved(access, new_val, value_type);
562 }
563 
atomic_add_at(C2AtomicAccess & access,Node * new_val,const Type * value_type) const564 Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
565   C2AccessFence fence(access);
566   resolve_address(access);
567   return atomic_add_at_resolved(access, new_val, value_type);
568 }
569 
clone(GraphKit * kit,Node * src,Node * dst,Node * size,bool is_array) const570 void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
571   // Exclude the header but include array length to copy by 8 bytes words.
572   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
573   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
574                             instanceOopDesc::base_offset_in_bytes();
575   // base_off:
576   // 8  - 32-bit VM
577   // 12 - 64-bit VM, compressed klass
578   // 16 - 64-bit VM, normal klass
579   if (base_off % BytesPerLong != 0) {
580     assert(UseCompressedClassPointers, "");
581     if (is_array) {
582       // Exclude length to copy by 8 bytes words.
583       base_off += sizeof(int);
584     } else {
585       // Include klass to copy by 8 bytes words.
586       base_off = instanceOopDesc::klass_offset_in_bytes();
587     }
588     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
589   }
590   Node* src_base  = kit->basic_plus_adr(src,  base_off);
591   Node* dst_base = kit->basic_plus_adr(dst, base_off);
592 
593   // Compute the length also, if needed:
594   Node* countx = size;
595   countx = kit->gvn().transform(new SubXNode(countx, kit->MakeConX(base_off)));
596   countx = kit->gvn().transform(new URShiftXNode(countx, kit->intcon(LogBytesPerLong) ));
597 
598   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
599 
600   ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false);
601   ac->set_clonebasic();
602   Node* n = kit->gvn().transform(ac);
603   if (n == ac) {
604     kit->set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
605   } else {
606     kit->set_all_memory(n);
607   }
608 }
609 
610 #define XTOP LP64_ONLY(COMMA phase->top())
611 
clone_at_expansion(PhaseMacroExpand * phase,ArrayCopyNode * ac) const612 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
613   Node* ctrl = ac->in(TypeFunc::Control);
614   Node* mem = ac->in(TypeFunc::Memory);
615   Node* src = ac->in(ArrayCopyNode::Src);
616   Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
617   Node* dest = ac->in(ArrayCopyNode::Dest);
618   Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
619   Node* length = ac->in(ArrayCopyNode::Length);
620 
621   assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null");
622 
623   const char* copyfunc_name = "arraycopy";
624   address     copyfunc_addr =
625           phase->basictype2arraycopy(T_LONG, NULL, NULL,
626                               true, copyfunc_name, true);
627 
628   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
629   const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
630 
631   Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, src, dest, length XTOP);
632   phase->transform_later(call);
633 
634   phase->igvn().replace_node(ac, call);
635 }
636