1 /*
2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "nativeInst_s390.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/macros.hpp"
37 #include "vmreg_s390.inline.hpp"
38 
39 #define __ ce->masm()->
40 #undef  CHECK_BAILOUT
41 #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
42 
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index,LIR_Opr array)43 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
44   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
45   assert(info != NULL, "must have info");
46   _info = new CodeEmitInfo(info);
47 }
48 
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index)49 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
50   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
51   assert(info != NULL, "must have info");
52   _info = new CodeEmitInfo(info);
53 }
54 
emit_code(LIR_Assembler * ce)55 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
56   __ bind(_entry);
57   if (_info->deoptimize_on_exception()) {
58     address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
59     ce->emit_call_c(a);
60     CHECK_BAILOUT();
61     ce->add_call_info_here(_info);
62     ce->verify_oop_map(_info);
63     debug_only(__ should_not_reach_here());
64     return;
65   }
66 
67   // Pass the array index in Z_R1_scratch which is not managed by linear scan.
68   if (_index->is_cpu_register()) {
69     __ lgr_if_needed(Z_R1_scratch, _index->as_register());
70   } else {
71     __ load_const_optimized(Z_R1_scratch, _index->as_jint());
72   }
73 
74   Runtime1::StubID stub_id;
75   if (_throw_index_out_of_bounds_exception) {
76     stub_id = Runtime1::throw_index_exception_id;
77   } else {
78     stub_id = Runtime1::throw_range_check_failed_id;
79     __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register());
80   }
81   ce->emit_call_c(Runtime1::entry_for (stub_id));
82   CHECK_BAILOUT();
83   ce->add_call_info_here(_info);
84   ce->verify_oop_map(_info);
85   debug_only(__ should_not_reach_here());
86 }
87 
PredicateFailedStub(CodeEmitInfo * info)88 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
89   _info = new CodeEmitInfo(info);
90 }
91 
emit_code(LIR_Assembler * ce)92 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
93   __ bind(_entry);
94   address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
95   ce->emit_call_c(a);
96   CHECK_BAILOUT();
97   ce->add_call_info_here(_info);
98   ce->verify_oop_map(_info);
99   debug_only(__ should_not_reach_here());
100 }
101 
emit_code(LIR_Assembler * ce)102 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
103   __ bind(_entry);
104   Metadata *m = _method->as_constant_ptr()->as_metadata();
105   bool success = __ set_metadata_constant(m, Z_R1_scratch);
106   if (!success) {
107     ce->compilation()->bailout("const section overflow");
108     return;
109   }
110   ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
111   ce->store_parameter(_bci, 0);
112   ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
113   CHECK_BAILOUT();
114   ce->add_call_info_here(_info);
115   ce->verify_oop_map(_info);
116   __ branch_optimized(Assembler::bcondAlways, _continuation);
117 }
118 
emit_code(LIR_Assembler * ce)119 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
120   if (_offset != -1) {
121     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
122   }
123   __ bind(_entry);
124   ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
125   CHECK_BAILOUT();
126   ce->add_call_info_here(_info);
127   debug_only(__ should_not_reach_here());
128 }
129 
emit_code(LIR_Assembler * ce)130 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
131   address a;
132   if (_info->deoptimize_on_exception()) {
133     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
134     a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
135   } else {
136     a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
137   }
138 
139   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
140   __ bind(_entry);
141   ce->emit_call_c(a);
142   CHECK_BAILOUT();
143   ce->add_call_info_here(_info);
144   ce->verify_oop_map(_info);
145   debug_only(__ should_not_reach_here());
146 }
147 
148 // Note: pass object in Z_R1_scratch
emit_code(LIR_Assembler * ce)149 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
150   __ bind(_entry);
151   if (_obj->is_valid()) {
152     __ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
153   }
154   address a = Runtime1::entry_for (_stub);
155   ce->emit_call_c(a);
156   CHECK_BAILOUT();
157   ce->add_call_info_here(_info);
158   debug_only(__ should_not_reach_here());
159 }
160 
NewInstanceStub(LIR_Opr klass_reg,LIR_Opr result,ciInstanceKlass * klass,CodeEmitInfo * info,Runtime1::StubID stub_id)161 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
162   _result = result;
163   _klass = klass;
164   _klass_reg = klass_reg;
165   _info = new CodeEmitInfo(info);
166   assert(stub_id == Runtime1::new_instance_id                 ||
167          stub_id == Runtime1::fast_new_instance_id            ||
168          stub_id == Runtime1::fast_new_instance_init_check_id,
169          "need new_instance id");
170   _stub_id = stub_id;
171 }
172 
emit_code(LIR_Assembler * ce)173 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
174   __ bind(_entry);
175   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
176   address a = Runtime1::entry_for (_stub_id);
177   ce->emit_call_c(a);
178   CHECK_BAILOUT();
179   ce->add_call_info_here(_info);
180   ce->verify_oop_map(_info);
181   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
182   __ z_brul(_continuation);
183 }
184 
NewTypeArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)185 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
186   _klass_reg = klass_reg;
187   _length = length;
188   _result = result;
189   _info = new CodeEmitInfo(info);
190 }
191 
emit_code(LIR_Assembler * ce)192 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
193   __ bind(_entry);
194   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
195   __ lgr_if_needed(Z_R13, _length->as_register());
196   address a = Runtime1::entry_for (Runtime1::new_type_array_id);
197   ce->emit_call_c(a);
198   CHECK_BAILOUT();
199   ce->add_call_info_here(_info);
200   ce->verify_oop_map(_info);
201   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
202   __ z_brul(_continuation);
203 }
204 
NewObjectArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)205 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
206   _klass_reg = klass_reg;
207   _length = length;
208   _result = result;
209   _info = new CodeEmitInfo(info);
210 }
211 
emit_code(LIR_Assembler * ce)212 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
213   __ bind(_entry);
214   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
215   __ lgr_if_needed(Z_R13, _length->as_register());
216   address a = Runtime1::entry_for (Runtime1::new_object_array_id);
217   ce->emit_call_c(a);
218   CHECK_BAILOUT();
219   ce->add_call_info_here(_info);
220   ce->verify_oop_map(_info);
221   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
222   __ z_brul(_continuation);
223 }
224 
MonitorEnterStub(LIR_Opr obj_reg,LIR_Opr lock_reg,CodeEmitInfo * info)225 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
226   : MonitorAccessStub(obj_reg, lock_reg) {
227   _info = new CodeEmitInfo(info);
228 }
229 
emit_code(LIR_Assembler * ce)230 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
231   __ bind(_entry);
232   Runtime1::StubID enter_id;
233   if (ce->compilation()->has_fpu_code()) {
234     enter_id = Runtime1::monitorenter_id;
235   } else {
236     enter_id = Runtime1::monitorenter_nofpu_id;
237   }
238   __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
239   __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
240   ce->emit_call_c(Runtime1::entry_for (enter_id));
241   CHECK_BAILOUT();
242   ce->add_call_info_here(_info);
243   ce->verify_oop_map(_info);
244   __ branch_optimized(Assembler::bcondAlways, _continuation);
245 }
246 
emit_code(LIR_Assembler * ce)247 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
248   __ bind(_entry);
249   // Move address of the BasicObjectLock into Z_R1_scratch.
250   if (_compute_lock) {
251     // Lock_reg was destroyed by fast unlocking attempt => recompute it.
252     ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
253   } else {
254     __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
255   }
256   // Note: non-blocking leaf routine => no call info needed.
257   Runtime1::StubID exit_id;
258   if (ce->compilation()->has_fpu_code()) {
259     exit_id = Runtime1::monitorexit_id;
260   } else {
261     exit_id = Runtime1::monitorexit_nofpu_id;
262   }
263   ce->emit_call_c(Runtime1::entry_for (exit_id));
264   CHECK_BAILOUT();
265   __ branch_optimized(Assembler::bcondAlways, _continuation);
266 }
267 
268 // Implementation of patching:
269 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
270 // - Replace original code with a call to the stub.
271 // At Runtime:
272 // - call to stub, jump to runtime.
273 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
274 // - in runtime: After initializing class, restore original code, reexecute instruction.
275 
276 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
277 
align_patch_site(MacroAssembler * masm)278 void PatchingStub::align_patch_site(MacroAssembler* masm) {
279 #ifndef PRODUCT
280   const char* bc;
281   switch (_id) {
282   case access_field_id: bc = "patch site (access_field)"; break;
283   case load_klass_id: bc = "patch site (load_klass)"; break;
284   case load_mirror_id: bc = "patch site (load_mirror)"; break;
285   case load_appendix_id: bc = "patch site (load_appendix)"; break;
286   default: bc = "patch site (unknown patch id)"; break;
287   }
288   masm->block_comment(bc);
289 #endif
290 
291   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
292 }
293 
emit_code(LIR_Assembler * ce)294 void PatchingStub::emit_code(LIR_Assembler* ce) {
295   // Copy original code here.
296   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
297          "not enough room for call");
298 
299   NearLabel call_patch;
300 
301   int being_initialized_entry = __ offset();
302 
303   if (_id == load_klass_id) {
304     // Produce a copy of the load klass instruction for use by the case being initialized.
305 #ifdef ASSERT
306     address start = __ pc();
307 #endif
308     AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
309     __ load_const(_obj, addrlit);
310 
311 #ifdef ASSERT
312     for (int i = 0; i < _bytes_to_copy; i++) {
313       address ptr = (address)(_pc_start + i);
314       int a_byte = (*ptr) & 0xFF;
315       assert(a_byte == *start++, "should be the same code");
316     }
317 #endif
318   } else if (_id == load_mirror_id || _id == load_appendix_id) {
319     // Produce a copy of the load mirror instruction for use by the case being initialized.
320 #ifdef ASSERT
321     address start = __ pc();
322 #endif
323     AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
324     __ load_const(_obj, addrlit);
325 
326 #ifdef ASSERT
327     for (int i = 0; i < _bytes_to_copy; i++) {
328       address ptr = (address)(_pc_start + i);
329       int a_byte = (*ptr) & 0xFF;
330       assert(a_byte == *start++, "should be the same code");
331     }
332 #endif
333   } else {
334     // Make a copy the code which is going to be patched.
335     for (int i = 0; i < _bytes_to_copy; i++) {
336       address ptr = (address)(_pc_start + i);
337       int a_byte = (*ptr) & 0xFF;
338       __ emit_int8 (a_byte);
339     }
340   }
341 
342   address end_of_patch = __ pc();
343   int bytes_to_skip = 0;
344   if (_id == load_mirror_id) {
345     int offset = __ offset();
346     if (CommentedAssembly) {
347       __ block_comment(" being_initialized check");
348     }
349 
350     // Static field accesses have special semantics while the class
351     // initializer is being run, so we emit a test which can be used to
352     // check that this code is being executed by the initializing
353     // thread.
354     assert(_obj != noreg, "must be a valid register");
355     assert(_index >= 0, "must have oop index");
356     __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj);
357     __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
358     __ branch_optimized(Assembler::bcondNotEqual, call_patch);
359 
360     // Load_klass patches may execute the patched code before it's
361     // copied back into place so we need to jump back into the main
362     // code of the nmethod to continue execution.
363     __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);
364 
365     // Make sure this extra code gets skipped.
366     bytes_to_skip += __ offset() - offset;
367   }
368 
369   // Now emit the patch record telling the runtime how to find the
370   // pieces of the patch. We only need 3 bytes but to help the disassembler
371   // we make the data look like a the following add instruction:
372   //   A R1, D2(X2, B2)
373   // which requires 4 bytes.
374   int sizeof_patch_record = 4;
375   bytes_to_skip += sizeof_patch_record;
376 
377   // Emit the offsets needed to find the code to patch.
378   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
379 
380   // Emit the patch record: opcode of the add followed by 3 bytes patch record data.
381   __ emit_int8((int8_t)(A_ZOPC>>24));
382   __ emit_int8(being_initialized_entry_offset);
383   __ emit_int8(bytes_to_skip);
384   __ emit_int8(_bytes_to_copy);
385   address patch_info_pc = __ pc();
386   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
387 
388   address entry = __ pc();
389   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
390   address target = NULL;
391   relocInfo::relocType reloc_type = relocInfo::none;
392   switch (_id) {
393     case access_field_id:  target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
394     case load_klass_id:    target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
395     case load_mirror_id:   target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
396     case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
397     default: ShouldNotReachHere();
398   }
399   __ bind(call_patch);
400 
401   if (CommentedAssembly) {
402     __ block_comment("patch entry point");
403   }
404   // Cannot use call_c_opt() because its size is not constant.
405   __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
406   __ z_basr(Z_R14, Z_R1_scratch);
407   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
408   ce->add_call_info_here(_info);
409   __ z_brcl(Assembler::bcondAlways, _patch_site_entry);
410   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
411     CodeSection* cs = __ code_section();
412     address pc = (address)_pc_start;
413     RelocIterator iter(cs, pc, pc + 1);
414     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
415   }
416 }
417 
emit_code(LIR_Assembler * ce)418 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
419   __ bind(_entry);
420   __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
421   ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
422   CHECK_BAILOUT();
423   ce->add_call_info_here(_info);
424   DEBUG_ONLY(__ should_not_reach_here());
425 }
426 
emit_code(LIR_Assembler * ce)427 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
428   // Slow case: call to native.
429   __ bind(_entry);
430   __ lgr_if_needed(Z_ARG1, src()->as_register());
431   __ lgr_if_needed(Z_ARG2, src_pos()->as_register());
432   __ lgr_if_needed(Z_ARG3, dst()->as_register());
433   __ lgr_if_needed(Z_ARG4, dst_pos()->as_register());
434   __ lgr_if_needed(Z_ARG5, length()->as_register());
435 
436   // Must align calls sites, otherwise they can't be updated atomically on MP hardware.
437   ce->align_call(lir_static_call);
438 
439   assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
440          "must be aligned");
441 
442   ce->emit_static_call_stub();
443 
444   // Prepend each BRASL with a nop.
445   __ relocate(relocInfo::static_call_type);
446   __ z_nop();
447   __ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub());
448   ce->add_call_info_here(info());
449   ce->verify_oop_map(info());
450 
451 #ifndef PRODUCT
452   __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt);
453   __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
454 #endif
455 
456   __ branch_optimized(Assembler::bcondAlways, _continuation);
457 }
458 
459 #undef __
460