1 /*
2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
29 #include "opto/arraycopynode.hpp"
30 #include "opto/graphKit.hpp"
31 #include "runtime/sharedRuntime.hpp"
32 #include "utilities/macros.hpp"
33 #include "utilities/powerOfTwo.hpp"
34 
ArrayCopyNode(Compile * C,bool alloc_tightly_coupled,bool has_negative_length_guard)35 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
36   : CallNode(arraycopy_type(), NULL, TypePtr::BOTTOM),
37     _kind(None),
38     _alloc_tightly_coupled(alloc_tightly_coupled),
39     _has_negative_length_guard(has_negative_length_guard),
40     _arguments_validated(false),
41     _src_type(TypeOopPtr::BOTTOM),
42     _dest_type(TypeOopPtr::BOTTOM) {
43   init_class_id(Class_ArrayCopy);
44   init_flags(Flag_is_macro);
45   C->add_macro_node(this);
46 }
47 
size_of() const48 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
49 
make(GraphKit * kit,bool may_throw,Node * src,Node * src_offset,Node * dest,Node * dest_offset,Node * length,bool alloc_tightly_coupled,bool has_negative_length_guard,Node * src_klass,Node * dest_klass,Node * src_length,Node * dest_length)50 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
51                                    Node* src, Node* src_offset,
52                                    Node* dest, Node* dest_offset,
53                                    Node* length,
54                                    bool alloc_tightly_coupled,
55                                    bool has_negative_length_guard,
56                                    Node* src_klass, Node* dest_klass,
57                                    Node* src_length, Node* dest_length) {
58 
59   ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard);
60   kit->set_predefined_input_for_runtime_call(ac);
61 
62   ac->init_req(ArrayCopyNode::Src, src);
63   ac->init_req(ArrayCopyNode::SrcPos, src_offset);
64   ac->init_req(ArrayCopyNode::Dest, dest);
65   ac->init_req(ArrayCopyNode::DestPos, dest_offset);
66   ac->init_req(ArrayCopyNode::Length, length);
67   ac->init_req(ArrayCopyNode::SrcLen, src_length);
68   ac->init_req(ArrayCopyNode::DestLen, dest_length);
69   ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
70   ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
71 
72   if (may_throw) {
73     ac->set_req(TypeFunc::I_O , kit->i_o());
74     kit->add_safepoint_edges(ac, false);
75   }
76 
77   return ac;
78 }
79 
connect_outputs(GraphKit * kit,bool deoptimize_on_exception)80 void ArrayCopyNode::connect_outputs(GraphKit* kit, bool deoptimize_on_exception) {
81   kit->set_all_memory_call(this, true);
82   kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
83   kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
84   kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true, deoptimize_on_exception);
85   kit->set_all_memory_call(this);
86 }
87 
88 #ifndef PRODUCT
89 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
90 
dump_spec(outputStream * st) const91 void ArrayCopyNode::dump_spec(outputStream *st) const {
92   CallNode::dump_spec(st);
93   st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
94 }
95 
dump_compact_spec(outputStream * st) const96 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
97   st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
98 }
99 #endif
100 
get_length_if_constant(PhaseGVN * phase) const101 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
102   // check that length is constant
103   Node* length = in(ArrayCopyNode::Length);
104   const Type* length_type = phase->type(length);
105 
106   if (length_type == Type::TOP) {
107     return -1;
108   }
109 
110   assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
111 
112   return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
113 }
114 
get_count(PhaseGVN * phase) const115 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
116   Node* src = in(ArrayCopyNode::Src);
117   const Type* src_type = phase->type(src);
118 
119   if (is_clonebasic()) {
120     if (src_type->isa_instptr()) {
121       const TypeInstPtr* inst_src = src_type->is_instptr();
122       ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
123       // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
124       // fields into account. They are rare anyway so easier to simply
125       // skip instances with injected fields.
126       if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
127         return -1;
128       }
129       int nb_fields = ik->nof_nonstatic_fields();
130       return nb_fields;
131     } else {
132       const TypeAryPtr* ary_src = src_type->isa_aryptr();
133       assert (ary_src != NULL, "not an array or instance?");
134       // clone passes a length as a rounded number of longs. If we're
135       // cloning an array we'll do it element by element. If the
136       // length input to ArrayCopyNode is constant, length of input
137       // array must be too.
138 
139       assert((get_length_if_constant(phase) == -1) != ary_src->size()->is_con() ||
140              phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
141 
142       if (ary_src->size()->is_con()) {
143         return ary_src->size()->get_con();
144       }
145       return -1;
146     }
147   }
148 
149   return get_length_if_constant(phase);
150 }
151 
load(BarrierSetC2 * bs,PhaseGVN * phase,Node * & ctl,MergeMemNode * mem,Node * adr,const TypePtr * adr_type,const Type * type,BasicType bt)152 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
153   DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY;
154   C2AccessValuePtr addr(adr, adr_type);
155   C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
156   Node* res = bs->load_at(access, type);
157   ctl = access.ctl();
158   return res;
159 }
160 
store(BarrierSetC2 * bs,PhaseGVN * phase,Node * & ctl,MergeMemNode * mem,Node * adr,const TypePtr * adr_type,Node * val,const Type * type,BasicType bt)161 void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) {
162   DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY;
163   if (is_alloc_tightly_coupled()) {
164     decorators |= C2_TIGHTLY_COUPLED_ALLOC;
165   }
166   C2AccessValuePtr addr(adr, adr_type);
167   C2AccessValue value(val, type);
168   C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
169   bs->store_at(access, value);
170   ctl = access.ctl();
171 }
172 
173 
try_clone_instance(PhaseGVN * phase,bool can_reshape,int count)174 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
175   if (!is_clonebasic()) {
176     return NULL;
177   }
178 
179   Node* base_src = in(ArrayCopyNode::Src);
180   Node* base_dest = in(ArrayCopyNode::Dest);
181   Node* ctl = in(TypeFunc::Control);
182   Node* in_mem = in(TypeFunc::Memory);
183 
184   const Type* src_type = phase->type(base_src);
185 
186   MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
187 
188   const TypeInstPtr* inst_src = src_type->isa_instptr();
189 
190   if (inst_src == NULL) {
191     return NULL;
192   }
193 
194   if (!inst_src->klass_is_exact()) {
195     ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
196     assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy");
197     phase->C->dependencies()->assert_leaf_type(ik);
198   }
199 
200   ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
201   assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
202 
203   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
204   for (int i = 0; i < count; i++) {
205     ciField* field = ik->nonstatic_field_at(i);
206     const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
207     Node* off = phase->MakeConX(field->offset());
208     Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
209     Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
210     BasicType bt = field->layout_type();
211 
212     const Type *type;
213     if (bt == T_OBJECT) {
214       if (!field->type()->is_loaded()) {
215         type = TypeInstPtr::BOTTOM;
216       } else {
217         ciType* field_klass = field->type();
218         type = TypeOopPtr::make_from_klass(field_klass->as_klass());
219       }
220     } else {
221       type = Type::get_const_basic_type(bt);
222     }
223 
224     Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt);
225     store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt);
226   }
227 
228   if (!finish_transform(phase, can_reshape, ctl, mem)) {
229     // Return NodeSentinel to indicate that the transform failed
230     return NodeSentinel;
231   }
232 
233   return mem;
234 }
235 
prepare_array_copy(PhaseGVN * phase,bool can_reshape,Node * & adr_src,Node * & base_src,Node * & adr_dest,Node * & base_dest,BasicType & copy_type,const Type * & value_type,bool & disjoint_bases)236 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
237                                        Node*& adr_src,
238                                        Node*& base_src,
239                                        Node*& adr_dest,
240                                        Node*& base_dest,
241                                        BasicType& copy_type,
242                                        const Type*& value_type,
243                                        bool& disjoint_bases) {
244   base_src = in(ArrayCopyNode::Src);
245   base_dest = in(ArrayCopyNode::Dest);
246   const Type* src_type = phase->type(base_src);
247   const TypeAryPtr* ary_src = src_type->isa_aryptr();
248 
249   Node* src_offset = in(ArrayCopyNode::SrcPos);
250   Node* dest_offset = in(ArrayCopyNode::DestPos);
251 
252   if (is_arraycopy() || is_copyofrange() || is_copyof()) {
253     const Type* dest_type = phase->type(base_dest);
254     const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
255 
256     // newly allocated object is guaranteed to not overlap with source object
257     disjoint_bases = is_alloc_tightly_coupled();
258 
259     if (ary_src  == NULL || ary_src->klass()  == NULL ||
260         ary_dest == NULL || ary_dest->klass() == NULL) {
261       // We don't know if arguments are arrays
262       return false;
263     }
264 
265     BasicType src_elem  = ary_src->klass()->as_array_klass()->element_type()->basic_type();
266     BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type();
267     if (is_reference_type(src_elem))   src_elem  = T_OBJECT;
268     if (is_reference_type(dest_elem))  dest_elem = T_OBJECT;
269 
270     if (src_elem != dest_elem || dest_elem == T_VOID) {
271       // We don't know if arguments are arrays of the same type
272       return false;
273     }
274 
275     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
276     if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, BarrierSetC2::Optimization)) {
277       // It's an object array copy but we can't emit the card marking
278       // that is needed
279       return false;
280     }
281 
282     value_type = ary_src->elem();
283 
284     uint shift  = exact_log2(type2aelembytes(dest_elem));
285     uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
286 
287     src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
288     dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
289     if (src_offset->is_top() || dest_offset->is_top()) {
290       // Offset is out of bounds (the ArrayCopyNode will be removed)
291       return false;
292     }
293 
294     Node* src_scale  = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
295     Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
296 
297     adr_src          = phase->transform(new AddPNode(base_src, base_src, src_scale));
298     adr_dest         = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
299 
300     adr_src          = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
301     adr_dest         = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
302 
303     copy_type = dest_elem;
304   } else {
305     assert(ary_src != NULL, "should be a clone");
306     assert(is_clonebasic(), "should be");
307 
308     disjoint_bases = true;
309 
310     adr_src  = phase->transform(new AddPNode(base_src, base_src, src_offset));
311     adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
312 
313     BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type();
314     if (is_reference_type(elem)) {
315       elem = T_OBJECT;
316     }
317 
318     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
319     if (bs->array_copy_requires_gc_barriers(true, elem, true, BarrierSetC2::Optimization)) {
320       return false;
321     }
322 
323     // The address is offseted to an aligned address where a raw copy would start.
324     // If the clone copy is decomposed into load-stores - the address is adjusted to
325     // point at where the array starts.
326     const Type* toff = phase->type(src_offset);
327     int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
328     int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
329     assert(diff >= 0, "clone should not start after 1st array element");
330     if (diff > 0) {
331       adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
332       adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
333     }
334     copy_type = elem;
335     value_type = ary_src->elem();
336   }
337   return true;
338 }
339 
get_address_type(PhaseGVN * phase,const TypePtr * atp,Node * n)340 const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
341   if (atp == TypeOopPtr::BOTTOM) {
342     atp = phase->type(n)->isa_ptr();
343   }
344   // adjust atp to be the correct array element address type
345   return atp->add_offset(Type::OffsetBot);
346 }
347 
array_copy_test_overlap(PhaseGVN * phase,bool can_reshape,bool disjoint_bases,int count,Node * & forward_ctl,Node * & backward_ctl)348 void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
349   Node* ctl = in(TypeFunc::Control);
350   if (!disjoint_bases && count > 1) {
351     Node* src_offset = in(ArrayCopyNode::SrcPos);
352     Node* dest_offset = in(ArrayCopyNode::DestPos);
353     assert(src_offset != NULL && dest_offset != NULL, "should be");
354     Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
355     Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
356     IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
357 
358     phase->transform(iff);
359 
360     forward_ctl = phase->transform(new IfFalseNode(iff));
361     backward_ctl = phase->transform(new IfTrueNode(iff));
362   } else {
363     forward_ctl = ctl;
364   }
365 }
366 
array_copy_forward(PhaseGVN * phase,bool can_reshape,Node * & forward_ctl,Node * mem,const TypePtr * atp_src,const TypePtr * atp_dest,Node * adr_src,Node * base_src,Node * adr_dest,Node * base_dest,BasicType copy_type,const Type * value_type,int count)367 Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
368                                         bool can_reshape,
369                                         Node*& forward_ctl,
370                                         Node* mem,
371                                         const TypePtr* atp_src,
372                                         const TypePtr* atp_dest,
373                                         Node* adr_src,
374                                         Node* base_src,
375                                         Node* adr_dest,
376                                         Node* base_dest,
377                                         BasicType copy_type,
378                                         const Type* value_type,
379                                         int count) {
380   if (!forward_ctl->is_top()) {
381     // copy forward
382     MergeMemNode* mm = MergeMemNode::make(mem);
383 
384     if (count > 0) {
385       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
386       Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type);
387       store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
388       for (int i = 1; i < count; i++) {
389         Node* off  = phase->MakeConX(type2aelembytes(copy_type) * i);
390         Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
391         Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
392         v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type);
393         store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
394       }
395     } else if (can_reshape) {
396       PhaseIterGVN* igvn = phase->is_IterGVN();
397       igvn->_worklist.push(adr_src);
398       igvn->_worklist.push(adr_dest);
399     }
400     return mm;
401   }
402   return phase->C->top();
403 }
404 
array_copy_backward(PhaseGVN * phase,bool can_reshape,Node * & backward_ctl,Node * mem,const TypePtr * atp_src,const TypePtr * atp_dest,Node * adr_src,Node * base_src,Node * adr_dest,Node * base_dest,BasicType copy_type,const Type * value_type,int count)405 Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
406                                          bool can_reshape,
407                                          Node*& backward_ctl,
408                                          Node* mem,
409                                          const TypePtr* atp_src,
410                                          const TypePtr* atp_dest,
411                                          Node* adr_src,
412                                          Node* base_src,
413                                          Node* adr_dest,
414                                          Node* base_dest,
415                                          BasicType copy_type,
416                                          const Type* value_type,
417                                          int count) {
418   if (!backward_ctl->is_top()) {
419     // copy backward
420     MergeMemNode* mm = MergeMemNode::make(mem);
421 
422     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
423     assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
424 
425     if (count > 0) {
426       for (int i = count-1; i >= 1; i--) {
427         Node* off  = phase->MakeConX(type2aelembytes(copy_type) * i);
428         Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
429         Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
430         Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type);
431         store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
432       }
433       Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type);
434       store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
435     } else if (can_reshape) {
436       PhaseIterGVN* igvn = phase->is_IterGVN();
437       igvn->_worklist.push(adr_src);
438       igvn->_worklist.push(adr_dest);
439     }
440     return phase->transform(mm);
441   }
442   return phase->C->top();
443 }
444 
finish_transform(PhaseGVN * phase,bool can_reshape,Node * ctl,Node * mem)445 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
446                                      Node* ctl, Node *mem) {
447   if (can_reshape) {
448     PhaseIterGVN* igvn = phase->is_IterGVN();
449     igvn->set_delay_transform(false);
450     if (is_clonebasic()) {
451       Node* out_mem = proj_out(TypeFunc::Memory);
452 
453       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
454       if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
455           out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
456         assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Optimization), "can only happen with card marking");
457         return false;
458       }
459 
460       igvn->replace_node(out_mem->raw_out(0), mem);
461 
462       Node* out_ctl = proj_out(TypeFunc::Control);
463       igvn->replace_node(out_ctl, ctl);
464     } else {
465       // replace fallthrough projections of the ArrayCopyNode by the
466       // new memory, control and the input IO.
467       CallProjections callprojs;
468       extract_projections(&callprojs, true, false);
469 
470       if (callprojs.fallthrough_ioproj != NULL) {
471         igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
472       }
473       if (callprojs.fallthrough_memproj != NULL) {
474         igvn->replace_node(callprojs.fallthrough_memproj, mem);
475       }
476       if (callprojs.fallthrough_catchproj != NULL) {
477         igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
478       }
479 
480       // The ArrayCopyNode is not disconnected. It still has the
481       // projections for the exception case. Replace current
482       // ArrayCopyNode with a dummy new one with a top() control so
483       // that this part of the graph stays consistent but is
484       // eventually removed.
485 
486       set_req(0, phase->C->top());
487       remove_dead_region(phase, can_reshape);
488     }
489   } else {
490     if (in(TypeFunc::Control) != ctl) {
491       // we can't return new memory and control from Ideal at parse time
492       assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
493       phase->record_for_igvn(this);
494       return false;
495     }
496   }
497   return true;
498 }
499 
500 
Ideal(PhaseGVN * phase,bool can_reshape)501 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
502   if (remove_dead_region(phase, can_reshape))  return this;
503 
504   if (StressArrayCopyMacroNode && !can_reshape) {
505     phase->record_for_igvn(this);
506     return NULL;
507   }
508 
509   // See if it's a small array copy and we can inline it as
510   // loads/stores
511   // Here we can only do:
512   // - arraycopy if all arguments were validated before and we don't
513   // need card marking
514   // - clone for which we don't need to do card marking
515 
516   if (!is_clonebasic() && !is_arraycopy_validated() &&
517       !is_copyofrange_validated() && !is_copyof_validated()) {
518     return NULL;
519   }
520 
521   assert(in(TypeFunc::Control) != NULL &&
522          in(TypeFunc::Memory) != NULL &&
523          in(ArrayCopyNode::Src) != NULL &&
524          in(ArrayCopyNode::Dest) != NULL &&
525          in(ArrayCopyNode::Length) != NULL &&
526          in(ArrayCopyNode::SrcPos) != NULL &&
527          in(ArrayCopyNode::DestPos) != NULL, "broken inputs");
528 
529   if (in(TypeFunc::Control)->is_top() ||
530       in(TypeFunc::Memory)->is_top() ||
531       phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
532       phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
533       (in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) ||
534       (in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) {
535     return NULL;
536   }
537 
538   int count = get_count(phase);
539 
540   if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
541     return NULL;
542   }
543 
544   Node* mem = try_clone_instance(phase, can_reshape, count);
545   if (mem != NULL) {
546     return (mem == NodeSentinel) ? NULL : mem;
547   }
548 
549   Node* adr_src = NULL;
550   Node* base_src = NULL;
551   Node* adr_dest = NULL;
552   Node* base_dest = NULL;
553   BasicType copy_type = T_ILLEGAL;
554   const Type* value_type = NULL;
555   bool disjoint_bases = false;
556 
557   if (!prepare_array_copy(phase, can_reshape,
558                           adr_src, base_src, adr_dest, base_dest,
559                           copy_type, value_type, disjoint_bases)) {
560     return NULL;
561   }
562 
563   Node* src = in(ArrayCopyNode::Src);
564   Node* dest = in(ArrayCopyNode::Dest);
565   const TypePtr* atp_src = get_address_type(phase, _src_type, src);
566   const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest);
567   Node* in_mem = in(TypeFunc::Memory);
568 
569   if (can_reshape) {
570     assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
571     phase->is_IterGVN()->set_delay_transform(true);
572   }
573 
574   Node* backward_ctl = phase->C->top();
575   Node* forward_ctl = phase->C->top();
576   array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
577 
578   Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
579                                          in_mem,
580                                          atp_src, atp_dest,
581                                          adr_src, base_src, adr_dest, base_dest,
582                                          copy_type, value_type, count);
583 
584   Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
585                                            in_mem,
586                                            atp_src, atp_dest,
587                                            adr_src, base_src, adr_dest, base_dest,
588                                            copy_type, value_type, count);
589 
590   Node* ctl = NULL;
591   if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
592     ctl = new RegionNode(3);
593     ctl->init_req(1, forward_ctl);
594     ctl->init_req(2, backward_ctl);
595     ctl = phase->transform(ctl);
596     MergeMemNode* forward_mm = forward_mem->as_MergeMem();
597     MergeMemNode* backward_mm = backward_mem->as_MergeMem();
598     for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) {
599       if (mms.memory() != mms.memory2()) {
600         Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx()));
601         phi->init_req(1, mms.memory());
602         phi->init_req(2, mms.memory2());
603         phi = phase->transform(phi);
604         mms.set_memory(phi);
605       }
606     }
607     mem = forward_mem;
608   } else if (!forward_ctl->is_top()) {
609     ctl = forward_ctl;
610     mem = forward_mem;
611   } else {
612     assert(!backward_ctl->is_top(), "no copy?");
613     ctl = backward_ctl;
614     mem = backward_mem;
615   }
616 
617   if (can_reshape) {
618     assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
619     phase->is_IterGVN()->set_delay_transform(false);
620   }
621 
622   if (!finish_transform(phase, can_reshape, ctl, mem)) {
623     return NULL;
624   }
625 
626   return mem;
627 }
628 
may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)629 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
630   Node* dest = in(ArrayCopyNode::Dest);
631   if (dest->is_top()) {
632     return false;
633   }
634   const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
635   assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
636   assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
637          _src_type->is_known_instance(), "result of EA not recorded");
638 
639   if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
640     assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance");
641     return t_oop->instance_id() == _dest_type->instance_id();
642   }
643 
644   return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase);
645 }
646 
may_modify_helper(const TypeOopPtr * t_oop,Node * n,PhaseTransform * phase,CallNode * & call)647 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call) {
648   if (n != NULL &&
649       n->is_Call() &&
650       n->as_Call()->may_modify(t_oop, phase) &&
651       (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) {
652     call = n->as_Call();
653     return true;
654   }
655   return false;
656 }
657 
may_modify(const TypeOopPtr * t_oop,MemBarNode * mb,PhaseTransform * phase,ArrayCopyNode * & ac)658 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac) {
659 
660   Node* c = mb->in(0);
661 
662   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
663   // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off
664   c = bs->step_over_gc_barrier(c);
665 
666   CallNode* call = NULL;
667   guarantee(c != NULL, "step_over_gc_barrier failed, there must be something to step to.");
668   if (c->is_Region()) {
669     for (uint i = 1; i < c->req(); i++) {
670       if (c->in(i) != NULL) {
671         Node* n = c->in(i)->in(0);
672         if (may_modify_helper(t_oop, n, phase, call)) {
673           ac = call->isa_ArrayCopy();
674           assert(c == mb->in(0), "only for clone");
675           return true;
676         }
677       }
678     }
679   } else if (may_modify_helper(t_oop, c->in(0), phase, call)) {
680     ac = call->isa_ArrayCopy();
681 #ifdef ASSERT
682     bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
683       static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks();
684     assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone");
685 #endif
686     return true;
687   } else if (mb->trailing_partial_array_copy()) {
688     return true;
689   }
690 
691   return false;
692 }
693 
694 // Does this array copy modify offsets between offset_lo and offset_hi
695 // in the destination array
696 // if must_modify is false, return true if the copy could write
697 // between offset_lo and offset_hi
698 // if must_modify is true, return true if the copy is guaranteed to
699 // write between offset_lo and offset_hi
modifies(intptr_t offset_lo,intptr_t offset_hi,PhaseTransform * phase,bool must_modify) const700 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify) const {
701   assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
702 
703   Node* dest = in(Dest);
704   Node* dest_pos = in(DestPos);
705   Node* len = in(Length);
706 
707   const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
708   const TypeInt *len_t = phase->type(len)->isa_int();
709   const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
710 
711   if (dest_pos_t == NULL || len_t == NULL || ary_t == NULL) {
712     return !must_modify;
713   }
714 
715   BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
716   uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
717   uint elemsize = type2aelembytes(ary_elem);
718 
719   jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
720   jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
721   jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
722   jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
723 
724   if (must_modify) {
725     if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
726       return true;
727     }
728   } else {
729     if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
730       return true;
731     }
732   }
733   return false;
734 }
735 
736 // As an optimization, choose optimum vector size for copy length known at compile time.
get_partial_inline_vector_lane_count(BasicType type,int const_len)737 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
738   int lane_count = ArrayCopyPartialInlineSize/type2aelembytes(type);
739   if (const_len > 0) {
740     int size_in_bytes = const_len * type2aelembytes(type);
741     if (size_in_bytes <= 16)
742       lane_count = 16/type2aelembytes(type);
743     else if (size_in_bytes > 16 && size_in_bytes <= 32)
744       lane_count = 32/type2aelembytes(type);
745   }
746   return lane_count;
747 }
748