1 /*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "nativeInst_sparc.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/macros.hpp"
35 #include "vmreg_sparc.inline.hpp"
36
37 #define __ ce->masm()->
38
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index,LIR_Opr array)39 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
40 : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
41 assert(info != NULL, "must have info");
42 _info = new CodeEmitInfo(info);
43 }
44
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index)45 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
46 : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
47 assert(info != NULL, "must have info");
48 _info = new CodeEmitInfo(info);
49 }
50
emit_code(LIR_Assembler * ce)51 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
52 __ bind(_entry);
53
54 if (_info->deoptimize_on_exception()) {
55 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
56 __ call(a, relocInfo::runtime_call_type);
57 __ delayed()->nop();
58 ce->add_call_info_here(_info);
59 ce->verify_oop_map(_info);
60 debug_only(__ should_not_reach_here());
61 return;
62 }
63
64 if (_index->is_register()) {
65 __ mov(_index->as_register(), G4);
66 } else {
67 __ set(_index->as_jint(), G4);
68 }
69 if (_throw_index_out_of_bounds_exception) {
70 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
71 } else {
72 __ mov(_array->as_pointer_register(), G5);
73 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
74 }
75 __ delayed()->nop();
76 ce->add_call_info_here(_info);
77 ce->verify_oop_map(_info);
78 debug_only(__ should_not_reach_here());
79 }
80
PredicateFailedStub(CodeEmitInfo * info)81 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
82 _info = new CodeEmitInfo(info);
83 }
84
emit_code(LIR_Assembler * ce)85 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
86 __ bind(_entry);
87 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
88 __ call(a, relocInfo::runtime_call_type);
89 __ delayed()->nop();
90 ce->add_call_info_here(_info);
91 ce->verify_oop_map(_info);
92 debug_only(__ should_not_reach_here());
93 }
94
emit_code(LIR_Assembler * ce)95 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
96 __ bind(_entry);
97 __ set(_bci, G4);
98 Metadata *m = _method->as_constant_ptr()->as_metadata();
99 __ set_metadata_constant(m, G5);
100 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
101 __ delayed()->nop();
102 ce->add_call_info_here(_info);
103 ce->verify_oop_map(_info);
104
105 __ br(Assembler::always, true, Assembler::pt, _continuation);
106 __ delayed()->nop();
107 }
108
109
emit_code(LIR_Assembler * ce)110 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
111 if (_offset != -1) {
112 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
113 }
114 __ bind(_entry);
115 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
116 __ delayed()->nop();
117 ce->add_call_info_here(_info);
118 ce->verify_oop_map(_info);
119 #ifdef ASSERT
120 __ should_not_reach_here();
121 #endif
122 }
123
124
emit_code(LIR_Assembler * ce)125 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
126 address a;
127 if (_info->deoptimize_on_exception()) {
128 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
129 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
130 } else {
131 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
132 }
133
134 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
135 __ bind(_entry);
136 __ call(a, relocInfo::runtime_call_type);
137 __ delayed()->nop();
138 ce->add_call_info_here(_info);
139 ce->verify_oop_map(_info);
140 #ifdef ASSERT
141 __ should_not_reach_here();
142 #endif
143 }
144
145
146 // Implementation of SimpleExceptionStub
147 // Note: %g1 and %g3 are already in use
emit_code(LIR_Assembler * ce)148 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
149 __ bind(_entry);
150 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
151
152 if (_obj->is_valid()) {
153 __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
154 } else {
155 __ delayed()->mov(G0, G4);
156 }
157 ce->add_call_info_here(_info);
158 #ifdef ASSERT
159 __ should_not_reach_here();
160 #endif
161 }
162
163
164 // Implementation of NewInstanceStub
165
NewInstanceStub(LIR_Opr klass_reg,LIR_Opr result,ciInstanceKlass * klass,CodeEmitInfo * info,Runtime1::StubID stub_id)166 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
167 _result = result;
168 _klass = klass;
169 _klass_reg = klass_reg;
170 _info = new CodeEmitInfo(info);
171 assert(stub_id == Runtime1::new_instance_id ||
172 stub_id == Runtime1::fast_new_instance_id ||
173 stub_id == Runtime1::fast_new_instance_init_check_id,
174 "need new_instance id");
175 _stub_id = stub_id;
176 }
177
178
emit_code(LIR_Assembler * ce)179 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
180 __ bind(_entry);
181 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
182 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
183 ce->add_call_info_here(_info);
184 ce->verify_oop_map(_info);
185 __ br(Assembler::always, false, Assembler::pt, _continuation);
186 __ delayed()->mov_or_nop(O0, _result->as_register());
187 }
188
189
190 // Implementation of NewTypeArrayStub
NewTypeArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)191 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
192 _klass_reg = klass_reg;
193 _length = length;
194 _result = result;
195 _info = new CodeEmitInfo(info);
196 }
197
198
emit_code(LIR_Assembler * ce)199 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
200 __ bind(_entry);
201
202 __ mov(_length->as_register(), G4);
203 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
204 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
205 ce->add_call_info_here(_info);
206 ce->verify_oop_map(_info);
207 __ br(Assembler::always, false, Assembler::pt, _continuation);
208 __ delayed()->mov_or_nop(O0, _result->as_register());
209 }
210
211
212 // Implementation of NewObjectArrayStub
213
NewObjectArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)214 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
215 _klass_reg = klass_reg;
216 _length = length;
217 _result = result;
218 _info = new CodeEmitInfo(info);
219 }
220
221
emit_code(LIR_Assembler * ce)222 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
223 __ bind(_entry);
224
225 __ mov(_length->as_register(), G4);
226 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
227 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
228 ce->add_call_info_here(_info);
229 ce->verify_oop_map(_info);
230 __ br(Assembler::always, false, Assembler::pt, _continuation);
231 __ delayed()->mov_or_nop(O0, _result->as_register());
232 }
233
234
235 // Implementation of MonitorAccessStubs
MonitorEnterStub(LIR_Opr obj_reg,LIR_Opr lock_reg,CodeEmitInfo * info)236 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
237 : MonitorAccessStub(obj_reg, lock_reg) {
238 _info = new CodeEmitInfo(info);
239 }
240
241
emit_code(LIR_Assembler * ce)242 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
243 __ bind(_entry);
244 __ mov(_obj_reg->as_register(), G4);
245 if (ce->compilation()->has_fpu_code()) {
246 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
247 } else {
248 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
249 }
250 __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
251 ce->add_call_info_here(_info);
252 ce->verify_oop_map(_info);
253 __ br(Assembler::always, true, Assembler::pt, _continuation);
254 __ delayed()->nop();
255 }
256
257
emit_code(LIR_Assembler * ce)258 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
259 __ bind(_entry);
260 if (_compute_lock) {
261 ce->monitor_address(_monitor_ix, _lock_reg);
262 }
263 if (ce->compilation()->has_fpu_code()) {
264 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
265 } else {
266 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
267 }
268
269 __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
270 __ br(Assembler::always, true, Assembler::pt, _continuation);
271 __ delayed()->nop();
272 }
273
274 // Implementation of patching:
275 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
276 // - Replace original code with a call to the stub
277 // At Runtime:
278 // - call to stub, jump to runtime
279 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
280 // - in runtime: after initializing class, restore original code, reexecute instruction
281
282 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
283
align_patch_site(MacroAssembler *)284 void PatchingStub::align_patch_site(MacroAssembler* ) {
285 // patch sites on sparc are always properly aligned.
286 }
287
emit_code(LIR_Assembler * ce)288 void PatchingStub::emit_code(LIR_Assembler* ce) {
289 // copy original code here
290 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
291 "not enough room for call");
292 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
293
294 Label call_patch;
295
296 int being_initialized_entry = __ offset();
297
298 if (_id == load_klass_id) {
299 // produce a copy of the load klass instruction for use by the being initialized case
300 #ifdef ASSERT
301 address start = __ pc();
302 #endif
303 AddressLiteral addrlit(NULL, metadata_Relocation::spec(_index));
304 __ patchable_set(addrlit, _obj);
305
306 #ifdef ASSERT
307 for (int i = 0; i < _bytes_to_copy; i++) {
308 address ptr = (address)(_pc_start + i);
309 int a_byte = (*ptr) & 0xFF;
310 assert(a_byte == *start++, "should be the same code");
311 }
312 #endif
313 } else if (_id == load_mirror_id || _id == load_appendix_id) {
314 // produce a copy of the load mirror instruction for use by the being initialized case
315 #ifdef ASSERT
316 address start = __ pc();
317 #endif
318 AddressLiteral addrlit(NULL, oop_Relocation::spec(_index));
319 __ patchable_set(addrlit, _obj);
320
321 #ifdef ASSERT
322 for (int i = 0; i < _bytes_to_copy; i++) {
323 address ptr = (address)(_pc_start + i);
324 int a_byte = (*ptr) & 0xFF;
325 assert(a_byte == *start++, "should be the same code");
326 }
327 #endif
328 } else {
329 // make a copy the code which is going to be patched.
330 for (int i = 0; i < _bytes_to_copy; i++) {
331 address ptr = (address)(_pc_start + i);
332 int a_byte = (*ptr) & 0xFF;
333 __ emit_int8 (a_byte);
334 }
335 }
336
337 address end_of_patch = __ pc();
338 int bytes_to_skip = 0;
339 if (_id == load_mirror_id) {
340 int offset = __ offset();
341 if (CommentedAssembly) {
342 __ block_comment(" being_initialized check");
343 }
344
345 // static field accesses have special semantics while the class
346 // initializer is being run so we emit a test which can be used to
347 // check that this code is being executed by the initializing
348 // thread.
349 assert(_obj != noreg, "must be a valid register");
350 assert(_index >= 0, "must have oop index");
351 __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
352 __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3);
353 __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
354
355 // load_klass patches may execute the patched code before it's
356 // copied back into place so we need to jump back into the main
357 // code of the nmethod to continue execution.
358 __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
359 __ delayed()->nop();
360
361 // make sure this extra code gets skipped
362 bytes_to_skip += __ offset() - offset;
363 }
364
365 // Now emit the patch record telling the runtime how to find the
366 // pieces of the patch. We only need 3 bytes but it has to be
367 // aligned as an instruction so emit 4 bytes.
368 int sizeof_patch_record = 4;
369 bytes_to_skip += sizeof_patch_record;
370
371 // emit the offsets needed to find the code to patch
372 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
373
374 // Emit the patch record. We need to emit a full word, so emit an extra empty byte
375 __ emit_int8(0);
376 __ emit_int8(being_initialized_entry_offset);
377 __ emit_int8(bytes_to_skip);
378 __ emit_int8(_bytes_to_copy);
379 address patch_info_pc = __ pc();
380 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
381
382 address entry = __ pc();
383 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
384 address target = NULL;
385 relocInfo::relocType reloc_type = relocInfo::none;
386 switch (_id) {
387 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
388 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
389 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
390 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
391 default: ShouldNotReachHere();
392 }
393 __ bind(call_patch);
394
395 if (CommentedAssembly) {
396 __ block_comment("patch entry point");
397 }
398 __ call(target, relocInfo::runtime_call_type);
399 __ delayed()->nop();
400 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
401 ce->add_call_info_here(_info);
402 __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
403 __ delayed()->nop();
404 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
405 CodeSection* cs = __ code_section();
406 address pc = (address)_pc_start;
407 RelocIterator iter(cs, pc, pc + 1);
408 relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
409
410 pc = (address)(_pc_start + NativeMovConstReg::add_offset);
411 RelocIterator iter2(cs, pc, pc+1);
412 relocInfo::change_reloc_info_for_address(&iter2, (address) pc, reloc_type, relocInfo::none);
413 }
414
415 }
416
417
emit_code(LIR_Assembler * ce)418 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
419 __ bind(_entry);
420 __ set(_trap_request, G4);
421 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
422 __ delayed()->nop();
423 ce->add_call_info_here(_info);
424 DEBUG_ONLY(__ should_not_reach_here());
425 }
426
427
emit_code(LIR_Assembler * ce)428 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
429 //---------------slow case: call to native-----------------
430 __ bind(_entry);
431 __ mov(src()->as_register(), O0);
432 __ mov(src_pos()->as_register(), O1);
433 __ mov(dst()->as_register(), O2);
434 __ mov(dst_pos()->as_register(), O3);
435 __ mov(length()->as_register(), O4);
436
437 ce->emit_static_call_stub();
438 if (ce->compilation()->bailed_out()) {
439 return; // CodeCache is full
440 }
441
442 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
443 __ delayed()->nop();
444 ce->add_call_info_here(info());
445 ce->verify_oop_map(info());
446
447 #ifndef PRODUCT
448 __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
449 __ ld(O0, 0, O1);
450 __ inc(O1);
451 __ st(O1, 0, O0);
452 #endif
453
454 __ br(Assembler::always, false, Assembler::pt, _continuation);
455 __ delayed()->nop();
456 }
457
458 #undef __
459