1 /*
2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Defs.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "ci/ciUtilities.hpp"
32 #include "gc/shared/cardTable.hpp"
33 #include "gc/shared/cardTableBarrierSet.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "nativeInst_s390.hpp"
36 #include "oops/compiledICHolder.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "register_s390.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/signature.hpp"
42 #include "runtime/vframeArray.hpp"
43 #include "utilities/macros.hpp"
44 #include "vmreg_s390.inline.hpp"
45 #include "registerSaver_s390.hpp"
46
47 // Implementation of StubAssembler
48
call_RT(Register oop_result1,Register metadata_result,address entry_point,int number_of_arguments)49 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
50 set_num_rt_args(0); // Nothing on stack.
51 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
52
53 // We cannot trust that code generated by the C++ compiler saves R14
54 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
55 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
56 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save
57 // it into the frame anchor.
58 address pc = get_PC(Z_R1_scratch);
59 int call_offset = (int)(pc - addr_at(0));
60 set_last_Java_frame(Z_SP, Z_R1_scratch);
61
62 // ARG1 must hold thread address.
63 z_lgr(Z_ARG1, Z_thread);
64
65 address return_pc = NULL;
66 align_call_far_patchable(this->pc());
67 return_pc = call_c_opt(entry_point);
68 assert(return_pc != NULL, "const section overflow");
69
70 reset_last_Java_frame();
71
72 // Check for pending exceptions.
73 {
74 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
75
76 // This used to conditionally jump to forward_exception however it is
77 // possible if we relocate that the branch will not reach. So we must jump
78 // around so we can always reach.
79
80 Label ok;
81 z_bre(ok); // Bcondequal is the same as bcondZero.
82
83 // exception pending => forward to exception handler
84
85 // Make sure that the vm_results are cleared.
86 if (oop_result1->is_valid()) {
87 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(jlong));
88 }
89 if (metadata_result->is_valid()) {
90 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(jlong));
91 }
92 if (frame_size() == no_frame_size) {
93 // Pop the stub frame.
94 pop_frame();
95 restore_return_pc();
96 load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
97 z_br(Z_R1);
98 } else if (_stub_id == Runtime1::forward_exception_id) {
99 should_not_reach_here();
100 } else {
101 load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id));
102 z_br(Z_R1);
103 }
104
105 bind(ok);
106 }
107
108 // Get oop results if there are any and reset the values in the thread.
109 if (oop_result1->is_valid()) {
110 get_vm_result(oop_result1);
111 }
112 if (metadata_result->is_valid()) {
113 get_vm_result_2(metadata_result);
114 }
115
116 return call_offset;
117 }
118
119
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1)120 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
121 // Z_ARG1 is reserved for the thread.
122 lgr_if_needed(Z_ARG2, arg1);
123 return call_RT(oop_result1, metadata_result, entry, 1);
124 }
125
126
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1,Register arg2)127 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
128 // Z_ARG1 is reserved for the thread.
129 lgr_if_needed(Z_ARG2, arg1);
130 assert(arg2 != Z_ARG2, "smashed argument");
131 lgr_if_needed(Z_ARG3, arg2);
132 return call_RT(oop_result1, metadata_result, entry, 2);
133 }
134
135
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1,Register arg2,Register arg3)136 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
137 // Z_ARG1 is reserved for the thread.
138 lgr_if_needed(Z_ARG2, arg1);
139 assert(arg2 != Z_ARG2, "smashed argument");
140 lgr_if_needed(Z_ARG3, arg2);
141 assert(arg3 != Z_ARG3, "smashed argument");
142 lgr_if_needed(Z_ARG4, arg3);
143 return call_RT(oop_result1, metadata_result, entry, 3);
144 }
145
146
147 // Implementation of Runtime1
148
149 #define __ sasm->
150
151 #ifndef PRODUCT
152 #undef __
153 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)->
154 #endif // !PRODUCT
155
156 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
157 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
158
generate_oop_map(StubAssembler * sasm)159 static OopMap* generate_oop_map(StubAssembler* sasm) {
160 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers;
161 int frame_size_in_slots =
162 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
163 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
164 return RegisterSaver::generate_oop_map(sasm, reg_set);
165 }
166
save_live_registers(StubAssembler * sasm,bool save_fpu_registers=true,Register return_pc=Z_R14)167 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) {
168 __ block_comment("save_live_registers");
169 RegisterSaver::RegisterSet reg_set =
170 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
171 int frame_size_in_slots =
172 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
173 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
174 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
175 }
176
save_live_registers_except_r2(StubAssembler * sasm,bool save_fpu_registers=true)177 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) {
178 if (!save_fpu_registers) {
179 __ unimplemented(FILE_AND_LINE);
180 }
181 __ block_comment("save_live_registers");
182 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2;
183 int frame_size_in_slots =
184 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
185 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
186 return RegisterSaver::save_live_registers(sasm, reg_set);
187 }
188
restore_live_registers(StubAssembler * sasm,bool restore_fpu_registers=true)189 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
190 __ block_comment("restore_live_registers");
191 RegisterSaver::RegisterSet reg_set =
192 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
193 RegisterSaver::restore_live_registers(sasm, reg_set);
194 }
195
restore_live_registers_except_r2(StubAssembler * sasm,bool restore_fpu_registers=true)196 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) {
197 if (!restore_fpu_registers) {
198 __ unimplemented(FILE_AND_LINE);
199 }
200 __ block_comment("restore_live_registers_except_r2");
201 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
202 }
203
initialize_pd()204 void Runtime1::initialize_pd() {
205 // Nothing to do.
206 }
207
generate_exception_throw(StubAssembler * sasm,address target,bool has_argument)208 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
209 // Make a frame and preserve the caller's caller-save registers.
210 OopMap* oop_map = save_live_registers(sasm);
211 int call_offset;
212 if (!has_argument) {
213 call_offset = __ call_RT(noreg, noreg, target);
214 } else {
215 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch);
216 }
217 OopMapSet* oop_maps = new OopMapSet();
218 oop_maps->add_gc_map(call_offset, oop_map);
219
220 __ should_not_reach_here();
221 return oop_maps;
222 }
223
generate_unwind_exception(StubAssembler * sasm)224 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
225 // Incoming parameters: Z_EXC_OOP and Z_EXC_PC.
226 // Keep copies in callee-saved registers during runtime call.
227 const Register exception_oop_callee_saved = Z_R11;
228 const Register exception_pc_callee_saved = Z_R12;
229 // Other registers used in this stub.
230 const Register handler_addr = Z_R4;
231
232 // Verify that only exception_oop, is valid at this time.
233 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC);
234
235 // Check that fields in JavaThread for exception oop and issuing pc are set.
236 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
237 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
238
239 // Save exception_oop and pc in callee-saved register to preserve it
240 // during runtime calls.
241 __ verify_not_null_oop(Z_EXC_OOP);
242 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP);
243 __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC);
244
245 __ push_frame_abi160(0); // Runtime code needs the z_abi_160.
246
247 // Search the exception handler address of the caller (using the return address).
248 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC);
249 // Z_RET(Z_R2): exception handler address of the caller.
250
251 __ pop_frame();
252
253 __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET);
254
255 // Move result of call into correct register.
256 __ lgr_if_needed(handler_addr, Z_RET);
257
258 // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler).
259 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved);
260 __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved);
261
262 // Verify that there is really a valid exception in Z_EXC_OOP.
263 __ verify_not_null_oop(Z_EXC_OOP);
264
265 __ z_br(handler_addr); // Jump to exception handler.
266 }
267
generate_patching(StubAssembler * sasm,address target)268 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
269 // Make a frame and preserve the caller's caller-save registers.
270 OopMap* oop_map = save_live_registers(sasm);
271
272 // Call the runtime patching routine, returns non-zero if nmethod got deopted.
273 int call_offset = __ call_RT(noreg, noreg, target);
274 OopMapSet* oop_maps = new OopMapSet();
275 oop_maps->add_gc_map(call_offset, oop_map);
276
277 // Re-execute the patched instruction or, if the nmethod was
278 // deoptmized, return to the deoptimization handler entry that will
279 // cause re-execution of the current bytecode.
280 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
281 assert(deopt_blob != NULL, "deoptimization blob must have been created");
282
283 __ z_ltr(Z_RET, Z_RET); // return value == 0
284
285 restore_live_registers(sasm);
286
287 __ z_bcr(Assembler::bcondZero, Z_R14);
288
289 // Return to the deoptimization handler entry for unpacking and
290 // rexecute if we simply returned then we'd deopt as if any call we
291 // patched had just returned.
292 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
293 __ load_const_optimized(Z_R1_scratch, dest);
294 __ z_br(Z_R1_scratch);
295
296 return oop_maps;
297 }
298
generate_code_for(StubID id,StubAssembler * sasm)299 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
300
301 // for better readability
302 const bool must_gc_arguments = true;
303 const bool dont_gc_arguments = false;
304
305 // Default value; overwritten for some optimized stubs that are
306 // called from methods that do not use the fpu.
307 bool save_fpu_registers = true;
308
309 // Stub code and info for the different stubs.
310 OopMapSet* oop_maps = NULL;
311 switch (id) {
312 case forward_exception_id:
313 {
314 oop_maps = generate_handle_exception(id, sasm);
315 // will not return
316 }
317 break;
318
319 case new_instance_id:
320 case fast_new_instance_id:
321 case fast_new_instance_init_check_id:
322 {
323 Register klass = Z_R11; // Incoming
324 Register obj = Z_R2; // Result
325
326 if (id == new_instance_id) {
327 __ set_info("new_instance", dont_gc_arguments);
328 } else if (id == fast_new_instance_id) {
329 __ set_info("fast new_instance", dont_gc_arguments);
330 } else {
331 assert(id == fast_new_instance_init_check_id, "bad StubID");
332 __ set_info("fast new_instance init check", dont_gc_arguments);
333 }
334
335 OopMap* map = save_live_registers_except_r2(sasm);
336 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
337 oop_maps = new OopMapSet();
338 oop_maps->add_gc_map(call_offset, map);
339 restore_live_registers_except_r2(sasm);
340
341 __ verify_oop(obj);
342 __ z_br(Z_R14);
343 }
344 break;
345
346 case counter_overflow_id:
347 {
348 // Arguments :
349 // bci : stack param 0
350 // method : stack param 1
351 //
352 Register bci = Z_ARG2, method = Z_ARG3;
353 // frame size in bytes
354 OopMap* map = save_live_registers(sasm);
355 const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
356 __ z_lg(bci, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
357 __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
358 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
359 oop_maps = new OopMapSet();
360 oop_maps->add_gc_map(call_offset, map);
361 restore_live_registers(sasm);
362 __ z_br(Z_R14);
363 }
364 break;
365 case new_type_array_id:
366 case new_object_array_id:
367 {
368 Register length = Z_R13; // Incoming
369 Register klass = Z_R11; // Incoming
370 Register obj = Z_R2; // Result
371
372 if (id == new_type_array_id) {
373 __ set_info("new_type_array", dont_gc_arguments);
374 } else {
375 __ set_info("new_object_array", dont_gc_arguments);
376 }
377
378 #ifdef ASSERT
379 // Assert object type is really an array of the proper kind.
380 {
381 NearLabel ok;
382 Register t0 = obj;
383 __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false);
384 __ z_sra(t0, Klass::_lh_array_tag_shift);
385 int tag = ((id == new_type_array_id)
386 ? Klass::_lh_array_tag_type_value
387 : Klass::_lh_array_tag_obj_value);
388 __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok);
389 __ stop("assert(is an array klass)");
390 __ should_not_reach_here();
391 __ bind(ok);
392 }
393 #endif // ASSERT
394
395 OopMap* map = save_live_registers_except_r2(sasm);
396 int call_offset;
397 if (id == new_type_array_id) {
398 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
399 } else {
400 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
401 }
402
403 oop_maps = new OopMapSet();
404 oop_maps->add_gc_map(call_offset, map);
405 restore_live_registers_except_r2(sasm);
406
407 __ verify_oop(obj);
408 __ z_br(Z_R14);
409 }
410 break;
411
412 case new_multi_array_id:
413 { __ set_info("new_multi_array", dont_gc_arguments);
414 // Z_R3,: klass
415 // Z_R4,: rank
416 // Z_R5: address of 1st dimension
417 OopMap* map = save_live_registers(sasm);
418 int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5);
419
420 oop_maps = new OopMapSet();
421 oop_maps->add_gc_map(call_offset, map);
422 restore_live_registers_except_r2(sasm);
423
424 // Z_R2,: new multi array
425 __ verify_oop(Z_R2);
426 __ z_br(Z_R14);
427 }
428 break;
429
430 case register_finalizer_id:
431 {
432 __ set_info("register_finalizer", dont_gc_arguments);
433
434 // Load the klass and check the has finalizer flag.
435 Register klass = Z_ARG2;
436 __ load_klass(klass, Z_ARG1);
437 __ testbit(Address(klass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER));
438 __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set.
439
440 OopMap* oop_map = save_live_registers(sasm);
441 int call_offset = __ call_RT(noreg, noreg,
442 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1);
443 oop_maps = new OopMapSet();
444 oop_maps->add_gc_map(call_offset, oop_map);
445
446 // Now restore all the live registers.
447 restore_live_registers(sasm);
448
449 __ z_br(Z_R14);
450 }
451 break;
452
453 case throw_range_check_failed_id:
454 { __ set_info("range_check_failed", dont_gc_arguments);
455 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
456 }
457 break;
458
459 case throw_index_exception_id:
460 { __ set_info("index_range_check_failed", dont_gc_arguments);
461 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
462 }
463 break;
464 case throw_div0_exception_id:
465 { __ set_info("throw_div0_exception", dont_gc_arguments);
466 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
467 }
468 break;
469 case throw_null_pointer_exception_id:
470 { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
471 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
472 }
473 break;
474 case handle_exception_nofpu_id:
475 case handle_exception_id:
476 { __ set_info("handle_exception", dont_gc_arguments);
477 oop_maps = generate_handle_exception(id, sasm);
478 }
479 break;
480 case handle_exception_from_callee_id:
481 { __ set_info("handle_exception_from_callee", dont_gc_arguments);
482 oop_maps = generate_handle_exception(id, sasm);
483 }
484 break;
485 case unwind_exception_id:
486 { __ set_info("unwind_exception", dont_gc_arguments);
487 // Note: no stubframe since we are about to leave the current
488 // activation and we are calling a leaf VM function only.
489 generate_unwind_exception(sasm);
490 }
491 break;
492 case throw_array_store_exception_id:
493 { __ set_info("throw_array_store_exception", dont_gc_arguments);
494 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
495 }
496 break;
497 case throw_class_cast_exception_id:
498 { // Z_R1_scratch: object
499 __ set_info("throw_class_cast_exception", dont_gc_arguments);
500 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
501 }
502 break;
503 case throw_incompatible_class_change_error_id:
504 { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
505 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
506 }
507 break;
508 case slow_subtype_check_id:
509 {
510 // Arguments :
511 // sub : stack param 0
512 // super: stack param 1
513 // raddr: Z_R14, blown by call
514 //
515 // Result : condition code 0 for match (bcondEqual will be true),
516 // condition code 2 for miss (bcondNotEqual will be true)
517 NearLabel miss;
518 const Register Rsubklass = Z_ARG2; // sub
519 const Register Rsuperklass = Z_ARG3; // super
520
521 // No args, but tmp registers that are killed.
522 const Register Rlength = Z_ARG4; // cache array length
523 const Register Rarray_ptr = Z_ARG5; // Current value from cache array.
524
525 if (UseCompressedOops) {
526 assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub");
527 }
528
529 const int frame_size = 4*BytesPerWord + frame::z_abi_160_size;
530 // Save return pc. This is not necessary, but could be helpful
531 // in the case of crashes.
532 __ save_return_pc();
533 __ push_frame(frame_size);
534 // Save registers before changing them.
535 int i = 0;
536 __ z_stg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
537 __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
538 __ z_stg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
539 __ z_stg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
540 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
541
542 // Get sub and super from stack.
543 __ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
544 __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
545
546 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss);
547
548 // Match falls through here.
549 i = 0;
550 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
551 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
552 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
553 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
554 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
555 __ pop_frame();
556 // Return pc is still in R_14.
557 __ clear_reg(Z_R0_scratch); // Zero indicates a match. Set CC 0 (bcondEqual will be true)
558 __ z_br(Z_R14);
559
560 __ BIND(miss);
561 i = 0;
562 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
563 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
564 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
565 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
566 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
567 __ pop_frame();
568 // return pc is still in R_14
569 __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss.
570 __ z_ltgr(Z_R0_scratch, Z_R0_scratch); // Set CC 2 (bcondNotEqual will be true).
571 __ z_br(Z_R14);
572 }
573 break;
574 case monitorenter_nofpu_id:
575 case monitorenter_id:
576 { // Z_R1_scratch : object
577 // Z_R13 : lock address (see LIRGenerator::syncTempOpr())
578 __ set_info("monitorenter", dont_gc_arguments);
579
580 int save_fpu_registers = (id == monitorenter_id);
581 // Make a frame and preserve the caller's caller-save registers.
582 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
583
584 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13);
585
586 oop_maps = new OopMapSet();
587 oop_maps->add_gc_map(call_offset, oop_map);
588 restore_live_registers(sasm, save_fpu_registers);
589
590 __ z_br(Z_R14);
591 }
592 break;
593
594 case monitorexit_nofpu_id:
595 case monitorexit_id:
596 { // Z_R1_scratch : lock address
597 // Note: really a leaf routine but must setup last java sp
598 // => Use call_RT for now (speed can be improved by
599 // doing last java sp setup manually).
600 __ set_info("monitorexit", dont_gc_arguments);
601
602 int save_fpu_registers = (id == monitorexit_id);
603 // Make a frame and preserve the caller's caller-save registers.
604 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
605
606 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch);
607
608 oop_maps = new OopMapSet();
609 oop_maps->add_gc_map(call_offset, oop_map);
610 restore_live_registers(sasm, save_fpu_registers);
611
612 __ z_br(Z_R14);
613 }
614 break;
615
616 case deoptimize_id:
617 { // Args: Z_R1_scratch: trap request
618 __ set_info("deoptimize", dont_gc_arguments);
619 Register trap_request = Z_R1_scratch;
620 OopMap* oop_map = save_live_registers(sasm);
621 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
622 oop_maps = new OopMapSet();
623 oop_maps->add_gc_map(call_offset, oop_map);
624 restore_live_registers(sasm);
625 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
626 assert(deopt_blob != NULL, "deoptimization blob must have been created");
627 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
628 __ load_const_optimized(Z_R1_scratch, dest);
629 __ z_br(Z_R1_scratch);
630 }
631 break;
632
633 case access_field_patching_id:
634 { __ set_info("access_field_patching", dont_gc_arguments);
635 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
636 }
637 break;
638
639 case load_klass_patching_id:
640 { __ set_info("load_klass_patching", dont_gc_arguments);
641 // We should set up register map.
642 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
643 }
644 break;
645
646 case load_mirror_patching_id:
647 { __ set_info("load_mirror_patching", dont_gc_arguments);
648 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
649 }
650 break;
651
652 case load_appendix_patching_id:
653 { __ set_info("load_appendix_patching", dont_gc_arguments);
654 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
655 }
656 break;
657 #if 0
658 case dtrace_object_alloc_id:
659 { // rax,: object
660 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
661 // We can't gc here so skip the oopmap but make sure that all
662 // the live registers get saved.
663 save_live_registers(sasm, 1);
664
665 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
666 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
667 NOT_LP64(__ pop(rax));
668
669 restore_live_registers(sasm);
670 }
671 break;
672
673 case fpu2long_stub_id:
674 {
675 // rax, and rdx are destroyed, but should be free since the result is returned there
676 // preserve rsi,ecx
677 __ push(rsi);
678 __ push(rcx);
679 LP64_ONLY(__ push(rdx);)
680
681 // check for NaN
682 Label return0, do_return, return_min_jlong, do_convert;
683
684 Address value_high_word(rsp, wordSize + 4);
685 Address value_low_word(rsp, wordSize);
686 Address result_high_word(rsp, 3*wordSize + 4);
687 Address result_low_word(rsp, 3*wordSize);
688
689 __ subptr(rsp, 32); // more than enough on 32bit
690 __ fst_d(value_low_word);
691 __ movl(rax, value_high_word);
692 __ andl(rax, 0x7ff00000);
693 __ cmpl(rax, 0x7ff00000);
694 __ jcc(Assembler::notEqual, do_convert);
695 __ movl(rax, value_high_word);
696 __ andl(rax, 0xfffff);
697 __ orl(rax, value_low_word);
698 __ jcc(Assembler::notZero, return0);
699
700 __ bind(do_convert);
701 __ fnstcw(Address(rsp, 0));
702 __ movzwl(rax, Address(rsp, 0));
703 __ orl(rax, 0xc00);
704 __ movw(Address(rsp, 2), rax);
705 __ fldcw(Address(rsp, 2));
706 __ fwait();
707 __ fistp_d(result_low_word);
708 __ fldcw(Address(rsp, 0));
709 __ fwait();
710 // This gets the entire long in rax on 64bit
711 __ movptr(rax, result_low_word);
712 // testing of high bits
713 __ movl(rdx, result_high_word);
714 __ mov(rcx, rax);
715 // What the heck is the point of the next instruction???
716 __ xorl(rcx, 0x0);
717 __ movl(rsi, 0x80000000);
718 __ xorl(rsi, rdx);
719 __ orl(rcx, rsi);
720 __ jcc(Assembler::notEqual, do_return);
721 __ fldz();
722 __ fcomp_d(value_low_word);
723 __ fnstsw_ax();
724 __ testl(rax, 0x4100); // ZF & CF == 0
725 __ jcc(Assembler::equal, return_min_jlong);
726 // return max_jlong
727 __ mov64(rax, CONST64(0x7fffffffffffffff));
728 __ jmp(do_return);
729
730 __ bind(return_min_jlong);
731 __ mov64(rax, UCONST64(0x8000000000000000));
732 __ jmp(do_return);
733
734 __ bind(return0);
735 __ fpop();
736 __ xorptr(rax, rax);
737
738 __ bind(do_return);
739 __ addptr(rsp, 32);
740 LP64_ONLY(__ pop(rdx);)
741 __ pop(rcx);
742 __ pop(rsi);
743 __ ret(0);
744 }
745 break;
746 #endif // TODO
747
748 case predicate_failed_trap_id:
749 {
750 __ set_info("predicate_failed_trap", dont_gc_arguments);
751
752 OopMap* map = save_live_registers(sasm);
753
754 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
755 oop_maps = new OopMapSet();
756 oop_maps->add_gc_map(call_offset, map);
757 restore_live_registers(sasm);
758
759 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
760 assert(deopt_blob != NULL, "deoptimization blob must have been created");
761
762 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
763 __ z_br(Z_R1_scratch);
764 }
765 break;
766
767 default:
768 {
769 __ should_not_reach_here(FILE_AND_LINE, id);
770 }
771 break;
772 }
773 return oop_maps;
774 }
775
generate_handle_exception(StubID id,StubAssembler * sasm)776 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
777 __ block_comment("generate_handle_exception");
778
779 // incoming parameters: Z_EXC_OOP, Z_EXC_PC
780
781 // Save registers if required.
782 OopMapSet* oop_maps = new OopMapSet();
783 OopMap* oop_map = NULL;
784 Register reg_fp = Z_R1_scratch;
785
786 switch (id) {
787 case forward_exception_id: {
788 // We're handling an exception in the context of a compiled frame.
789 // The registers have been saved in the standard places. Perform
790 // an exception lookup in the caller and dispatch to the handler
791 // if found. Otherwise unwind and dispatch to the callers
792 // exception handler.
793 oop_map = generate_oop_map(sasm);
794
795 // Load and clear pending exception oop into.
796 __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset()));
797 __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8);
798
799 // Different stubs forward their exceptions; they should all have similar frame layouts
800 // (a) to find their return address (b) for a correct oop_map generated above.
801 assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) ==
802 RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement");
803
804 // Load issuing PC (the return address for this stub).
805 const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
806 __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_abi16(return_pc)));
807 DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));)
808
809 // Make sure that the vm_results are cleared (may be unnecessary).
810 __ clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(oop));
811 __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*));
812 break;
813 }
814 case handle_exception_nofpu_id:
815 case handle_exception_id:
816 // At this point all registers MAY be live.
817 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
818 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC);
819 break;
820 case handle_exception_from_callee_id: {
821 // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead.
822 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
823 __ save_return_pc(Z_EXC_PC);
824 const int frame_size_in_bytes = __ push_frame_abi160(0);
825 oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0);
826 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
827 break;
828 }
829 default: ShouldNotReachHere();
830 }
831
832 // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time.
833 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp);
834 // Verify that Z_EXC_OOP, contains a valid exception.
835 __ verify_not_null_oop(Z_EXC_OOP);
836
837 // Check that fields in JavaThread for exception oop and issuing pc
838 // are empty before writing to them.
839 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
840 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
841
842 // Save exception oop and issuing pc into JavaThread.
843 // (Exception handler will load it from here.)
844 __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset()));
845 __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset()));
846
847 #ifdef ASSERT
848 { NearLabel ok;
849 __ z_cg(Z_EXC_PC, Address(reg_fp, _z_abi16(return_pc)));
850 __ branch_optimized(Assembler::bcondEqual, ok);
851 __ stop("use throwing pc as return address (has bci & oop map)");
852 __ bind(ok);
853 }
854 #endif
855
856 // Compute the exception handler.
857 // The exception oop and the throwing pc are read from the fields in JavaThread.
858 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
859 oop_maps->add_gc_map(call_offset, oop_map);
860
861 // Z_RET(Z_R2): handler address
862 // will be the deopt blob if nmethod was deoptimized while we looked up
863 // handler regardless of whether handler existed in the nmethod.
864
865 // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call.
866 __ invalidate_registers(Z_R2);
867
868 switch(id) {
869 case forward_exception_id:
870 case handle_exception_nofpu_id:
871 case handle_exception_id:
872 // Restore the registers that were saved at the beginning.
873 __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2.
874 restore_live_registers(sasm, id != handle_exception_nofpu_id); // Pops as well the frame.
875 __ z_br(Z_R1_scratch);
876 break;
877 case handle_exception_from_callee_id: {
878 __ pop_frame();
879 __ z_br(Z_R2); // Jump to exception handler.
880 }
881 break;
882 default: ShouldNotReachHere();
883 }
884
885 return oop_maps;
886 }
887
888
889 #undef __
890
pd_name_for_address(address entry)891 const char *Runtime1::pd_name_for_address(address entry) {
892 return "<unknown function>";
893 }
894