1 /*
2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "nativeInst_aarch64.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_aarch64.inline.hpp"
36 
37 
38 #define __ ce->masm()->
39 
emit_code(LIR_Assembler * ce)40 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
41   __ bind(_entry);
42   Metadata *m = _method->as_constant_ptr()->as_metadata();
43   __ mov_metadata(rscratch1, m);
44   ce->store_parameter(rscratch1, 1);
45   ce->store_parameter(_bci, 0);
46   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
47   ce->add_call_info_here(_info);
48   ce->verify_oop_map(_info);
49   __ b(_continuation);
50 }
51 
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index,LIR_Opr array)52 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
53   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
54   assert(info != NULL, "must have info");
55   _info = new CodeEmitInfo(info);
56 }
57 
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index)58 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
59   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
60   assert(info != NULL, "must have info");
61   _info = new CodeEmitInfo(info);
62 }
63 
emit_code(LIR_Assembler * ce)64 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
65   __ bind(_entry);
66   if (_info->deoptimize_on_exception()) {
67     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
68     __ far_call(RuntimeAddress(a));
69     ce->add_call_info_here(_info);
70     ce->verify_oop_map(_info);
71     debug_only(__ should_not_reach_here());
72     return;
73   }
74 
75   if (_index->is_cpu_register()) {
76     __ mov(rscratch1, _index->as_register());
77   } else {
78     __ mov(rscratch1, _index->as_jint());
79   }
80   Runtime1::StubID stub_id;
81   if (_throw_index_out_of_bounds_exception) {
82     stub_id = Runtime1::throw_index_exception_id;
83   } else {
84     assert(_array != NULL, "sanity");
85     __ mov(rscratch2, _array->as_pointer_register());
86     stub_id = Runtime1::throw_range_check_failed_id;
87   }
88   __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
89   __ blr(lr);
90   ce->add_call_info_here(_info);
91   ce->verify_oop_map(_info);
92   debug_only(__ should_not_reach_here());
93 }
94 
PredicateFailedStub(CodeEmitInfo * info)95 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
96   _info = new CodeEmitInfo(info);
97 }
98 
emit_code(LIR_Assembler * ce)99 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
100   __ bind(_entry);
101   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
102   __ far_call(RuntimeAddress(a));
103   ce->add_call_info_here(_info);
104   ce->verify_oop_map(_info);
105   debug_only(__ should_not_reach_here());
106 }
107 
emit_code(LIR_Assembler * ce)108 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
109   if (_offset != -1) {
110     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
111   }
112   __ bind(_entry);
113   __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type));
114   ce->add_call_info_here(_info);
115   ce->verify_oop_map(_info);
116 #ifdef ASSERT
117   __ should_not_reach_here();
118 #endif
119 }
120 
121 
122 
123 // Implementation of NewInstanceStub
124 
NewInstanceStub(LIR_Opr klass_reg,LIR_Opr result,ciInstanceKlass * klass,CodeEmitInfo * info,Runtime1::StubID stub_id)125 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
126   _result = result;
127   _klass = klass;
128   _klass_reg = klass_reg;
129   _info = new CodeEmitInfo(info);
130   assert(stub_id == Runtime1::new_instance_id                 ||
131          stub_id == Runtime1::fast_new_instance_id            ||
132          stub_id == Runtime1::fast_new_instance_init_check_id,
133          "need new_instance id");
134   _stub_id   = stub_id;
135 }
136 
137 
138 
emit_code(LIR_Assembler * ce)139 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
140   assert(__ rsp_offset() == 0, "frame size should be fixed");
141   __ bind(_entry);
142   __ mov(r3, _klass_reg->as_register());
143   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
144   ce->add_call_info_here(_info);
145   ce->verify_oop_map(_info);
146   assert(_result->as_register() == r0, "result must in r0,");
147   __ b(_continuation);
148 }
149 
150 
151 // Implementation of NewTypeArrayStub
152 
153 // Implementation of NewTypeArrayStub
154 
NewTypeArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)155 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
156   _klass_reg = klass_reg;
157   _length = length;
158   _result = result;
159   _info = new CodeEmitInfo(info);
160 }
161 
162 
emit_code(LIR_Assembler * ce)163 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
164   assert(__ rsp_offset() == 0, "frame size should be fixed");
165   __ bind(_entry);
166   assert(_length->as_register() == r19, "length must in r19,");
167   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
168   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
169   ce->add_call_info_here(_info);
170   ce->verify_oop_map(_info);
171   assert(_result->as_register() == r0, "result must in r0");
172   __ b(_continuation);
173 }
174 
175 
176 // Implementation of NewObjectArrayStub
177 
NewObjectArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)178 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
179   _klass_reg = klass_reg;
180   _result = result;
181   _length = length;
182   _info = new CodeEmitInfo(info);
183 }
184 
185 
emit_code(LIR_Assembler * ce)186 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
187   assert(__ rsp_offset() == 0, "frame size should be fixed");
188   __ bind(_entry);
189   assert(_length->as_register() == r19, "length must in r19,");
190   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
191   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
192   ce->add_call_info_here(_info);
193   ce->verify_oop_map(_info);
194   assert(_result->as_register() == r0, "result must in r0");
195   __ b(_continuation);
196 }
197 // Implementation of MonitorAccessStubs
198 
MonitorEnterStub(LIR_Opr obj_reg,LIR_Opr lock_reg,CodeEmitInfo * info)199 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
200 : MonitorAccessStub(obj_reg, lock_reg)
201 {
202   _info = new CodeEmitInfo(info);
203 }
204 
205 
emit_code(LIR_Assembler * ce)206 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
207   assert(__ rsp_offset() == 0, "frame size should be fixed");
208   __ bind(_entry);
209   ce->store_parameter(_obj_reg->as_register(),  1);
210   ce->store_parameter(_lock_reg->as_register(), 0);
211   Runtime1::StubID enter_id;
212   if (ce->compilation()->has_fpu_code()) {
213     enter_id = Runtime1::monitorenter_id;
214   } else {
215     enter_id = Runtime1::monitorenter_nofpu_id;
216   }
217   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
218   ce->add_call_info_here(_info);
219   ce->verify_oop_map(_info);
220   __ b(_continuation);
221 }
222 
223 
emit_code(LIR_Assembler * ce)224 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
225   __ bind(_entry);
226   if (_compute_lock) {
227     // lock_reg was destroyed by fast unlocking attempt => recompute it
228     ce->monitor_address(_monitor_ix, _lock_reg);
229   }
230   ce->store_parameter(_lock_reg->as_register(), 0);
231   // note: non-blocking leaf routine => no call info needed
232   Runtime1::StubID exit_id;
233   if (ce->compilation()->has_fpu_code()) {
234     exit_id = Runtime1::monitorexit_id;
235   } else {
236     exit_id = Runtime1::monitorexit_nofpu_id;
237   }
238   __ adr(lr, _continuation);
239   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
240 }
241 
242 
243 // Implementation of patching:
244 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
245 // - Replace original code with a call to the stub
246 // At Runtime:
247 // - call to stub, jump to runtime
248 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
249 // - in runtime: after initializing class, restore original code, reexecute instruction
250 
251 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
252 
align_patch_site(MacroAssembler * masm)253 void PatchingStub::align_patch_site(MacroAssembler* masm) {
254 }
255 
emit_code(LIR_Assembler * ce)256 void PatchingStub::emit_code(LIR_Assembler* ce) {
257   assert(false, "AArch64 should not use C1 runtime patching");
258 }
259 
260 
emit_code(LIR_Assembler * ce)261 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
262   __ bind(_entry);
263   ce->store_parameter(_trap_request, 0);
264   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
265   ce->add_call_info_here(_info);
266   DEBUG_ONLY(__ should_not_reach_here());
267 }
268 
269 
emit_code(LIR_Assembler * ce)270 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
271   address a;
272   if (_info->deoptimize_on_exception()) {
273     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
274     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
275   } else {
276     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
277   }
278 
279   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
280   __ bind(_entry);
281   __ far_call(RuntimeAddress(a));
282   ce->add_call_info_here(_info);
283   ce->verify_oop_map(_info);
284   debug_only(__ should_not_reach_here());
285 }
286 
287 
emit_code(LIR_Assembler * ce)288 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
289   assert(__ rsp_offset() == 0, "frame size should be fixed");
290 
291   __ bind(_entry);
292   // pass the object in a scratch register because all other registers
293   // must be preserved
294   if (_obj->is_cpu_register()) {
295     __ mov(rscratch1, _obj->as_register());
296   }
297   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2);
298   ce->add_call_info_here(_info);
299   debug_only(__ should_not_reach_here());
300 }
301 
302 
emit_code(LIR_Assembler * ce)303 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
304   //---------------slow case: call to native-----------------
305   __ bind(_entry);
306   // Figure out where the args should go
307   // This should really convert the IntrinsicID to the Method* and signature
308   // but I don't know how to do that.
309   //
310   VMRegPair args[5];
311   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
312   SharedRuntime::java_calling_convention(signature, args, 5, true);
313 
314   // push parameters
315   // (src, src_pos, dest, destPos, length)
316   Register r[5];
317   r[0] = src()->as_register();
318   r[1] = src_pos()->as_register();
319   r[2] = dst()->as_register();
320   r[3] = dst_pos()->as_register();
321   r[4] = length()->as_register();
322 
323   // next registers will get stored on the stack
324   for (int i = 0; i < 5 ; i++ ) {
325     VMReg r_1 = args[i].first();
326     if (r_1->is_stack()) {
327       int st_off = r_1->reg2stack() * wordSize;
328       __ str (r[i], Address(sp, st_off));
329     } else {
330       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
331     }
332   }
333 
334   ce->align_call(lir_static_call);
335 
336   ce->emit_static_call_stub();
337   if (ce->compilation()->bailed_out()) {
338     return; // CodeCache is full
339   }
340   Address resolve(SharedRuntime::get_resolve_static_call_stub(),
341                   relocInfo::static_call_type);
342   address call = __ trampoline_call(resolve);
343   if (call == NULL) {
344     ce->bailout("trampoline stub overflow");
345     return;
346   }
347   ce->add_call_info_here(info());
348 
349 #ifndef PRODUCT
350   __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
351   __ incrementw(Address(rscratch2));
352 #endif
353 
354   __ b(_continuation);
355 }
356 
357 #undef __
358