1 /*
2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "ci/ciUtilities.hpp"
32 #include "gc/shared/cardTable.hpp"
33 #include "gc/shared/cardTableBarrierSet.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "nativeInst_arm.hpp"
36 #include "oops/compiledICHolder.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "register_arm.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/signature.hpp"
42 #include "runtime/vframeArray.hpp"
43 #include "utilities/align.hpp"
44 #include "vmreg_arm.inline.hpp"
45 
46 // Note: Rtemp usage is this file should not impact C2 and should be
47 // correct as long as it is not implicitly used in lower layers (the
48 // arm [macro]assembler) and used with care in the other C1 specific
49 // files.
50 
51 // Implementation of StubAssembler
52 
call_RT(Register oop_result1,Register metadata_result,address entry,int args_size)53 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
54   mov(R0, Rthread);
55 
56   int call_offset = set_last_Java_frame(SP, FP, false, Rtemp);
57 
58   call(entry);
59   if (call_offset == -1) { // PC not saved
60     call_offset = offset();
61   }
62   reset_last_Java_frame(Rtemp);
63 
64   assert(frame_size() != no_frame_size, "frame must be fixed");
65   if (_stub_id != Runtime1::forward_exception_id) {
66     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
67   }
68 
69   if (oop_result1->is_valid()) {
70     assert_different_registers(oop_result1, R3, Rtemp);
71     get_vm_result(oop_result1, Rtemp);
72   }
73   if (metadata_result->is_valid()) {
74     assert_different_registers(metadata_result, R3, Rtemp);
75     get_vm_result_2(metadata_result, Rtemp);
76   }
77 
78   // Check for pending exception
79   // unpack_with_exception_in_tls path is taken through
80   // Runtime1::exception_handler_for_pc
81   if (_stub_id != Runtime1::forward_exception_id) {
82     assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
83     cmp(R3, 0);
84     jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
85   } else {
86 #ifdef ASSERT
87     // Should not have pending exception in forward_exception stub
88     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
89     cmp(R3, 0);
90     breakpoint(ne);
91 #endif // ASSERT
92   }
93   return call_offset;
94 }
95 
96 
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1)97 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
98   if (arg1 != R1) {
99     mov(R1, arg1);
100   }
101   return call_RT(oop_result1, metadata_result, entry, 1);
102 }
103 
104 
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1,Register arg2)105 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
106   assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise");
107   return call_RT(oop_result1, metadata_result, entry, 2);
108 }
109 
110 
call_RT(Register oop_result1,Register metadata_result,address entry,Register arg1,Register arg2,Register arg3)111 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
112   assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise");
113   return call_RT(oop_result1, metadata_result, entry, 3);
114 }
115 
116 
117 #define __ sasm->
118 
119 // TODO: ARM - does this duplicate RegisterSaver in SharedRuntime?
120 
121 enum RegisterLayout {
122   fpu_save_size = pd_nof_fpu_regs_reg_alloc,
123 #ifndef __SOFTFP__
124   D0_offset = 0,
125 #endif
126   R0_offset = fpu_save_size,
127   R1_offset,
128   R2_offset,
129   R3_offset,
130   R4_offset,
131   R5_offset,
132   R6_offset,
133 #if (FP_REG_NUM != 7)
134   R7_offset,
135 #endif
136   R8_offset,
137   R9_offset,
138   R10_offset,
139 #if (FP_REG_NUM != 11)
140   R11_offset,
141 #endif
142   R12_offset,
143   FP_offset,
144   LR_offset,
145   reg_save_size,
146   arg1_offset = reg_save_size * wordSize,
147   arg2_offset = (reg_save_size + 1) * wordSize
148 };
149 
150 
generate_oop_map(StubAssembler * sasm,bool save_fpu_registers=HaveVFP)151 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
152   sasm->set_frame_size(reg_save_size /* in words */);
153 
154   // Record saved value locations in an OopMap.
155   // Locations are offsets from sp after runtime call.
156   OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0);
157 
158   int j=0;
159   for (int i = R0_offset; i < R10_offset; i++) {
160     if (j == FP_REG_NUM) {
161       // skip the FP register, saved below
162       j++;
163     }
164     map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg());
165     j++;
166   }
167   assert(j == R10->encoding(), "must be");
168 #if (FP_REG_NUM != 11)
169   // add R11, if not saved as FP
170   map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg());
171 #endif
172   map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg());
173   map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg());
174 
175   if (save_fpu_registers) {
176     for (int i = 0; i < fpu_save_size; i++) {
177       map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg());
178     }
179   }
180 
181   return map;
182 }
183 
save_live_registers(StubAssembler * sasm,bool save_fpu_registers=HaveVFP)184 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) {
185   __ block_comment("save_live_registers");
186   sasm->set_frame_size(reg_save_size /* in words */);
187 
188   __ push(RegisterSet(FP) | RegisterSet(LR));
189   __ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
190   if (save_fpu_registers) {
191     __ fstmdbd(SP, FloatRegisterSet(D0, fpu_save_size / 2), writeback);
192   } else {
193     __ sub(SP, SP, fpu_save_size * wordSize);
194   }
195 
196   return generate_oop_map(sasm, save_fpu_registers);
197 }
198 
199 
restore_live_registers(StubAssembler * sasm,bool restore_R0,bool restore_FP_LR,bool do_return,bool restore_fpu_registers=HaveVFP)200 static void restore_live_registers(StubAssembler* sasm,
201                                    bool restore_R0,
202                                    bool restore_FP_LR,
203                                    bool do_return,
204                                    bool restore_fpu_registers = HaveVFP) {
205   __ block_comment("restore_live_registers");
206 
207   if (restore_fpu_registers) {
208     __ fldmiad(SP, FloatRegisterSet(D0, fpu_save_size / 2), writeback);
209     if (!restore_R0) {
210       __ add(SP, SP, (R1_offset - fpu_save_size) * wordSize);
211     }
212   } else {
213     __ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize);
214   }
215   __ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11);
216   if (restore_FP_LR) {
217     __ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR));
218   } else {
219     assert (!do_return, "return without restoring FP/LR");
220   }
221 }
222 
223 
restore_live_registers_except_R0(StubAssembler * sasm,bool restore_fpu_registers=HaveVFP)224 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
225   restore_live_registers(sasm, false, true, true, restore_fpu_registers);
226 }
227 
restore_live_registers(StubAssembler * sasm,bool restore_fpu_registers=HaveVFP)228 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
229   restore_live_registers(sasm, true, true, true, restore_fpu_registers);
230 }
231 
restore_live_registers_except_FP_LR(StubAssembler * sasm,bool restore_fpu_registers=HaveVFP)232 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
233   restore_live_registers(sasm, true, false, false, restore_fpu_registers);
234 }
235 
restore_live_registers_without_return(StubAssembler * sasm,bool restore_fpu_registers=HaveVFP)236 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) {
237   restore_live_registers(sasm, true, true, false, restore_fpu_registers);
238 }
239 
save_live_registers()240 void StubAssembler::save_live_registers() {
241   ::save_live_registers(this);
242 }
243 
restore_live_registers_without_return()244 void StubAssembler::restore_live_registers_without_return() {
245   ::restore_live_registers_without_return(this);
246 }
247 
initialize_pd()248 void Runtime1::initialize_pd() {
249 }
250 
251 
generate_exception_throw(StubAssembler * sasm,address target,bool has_argument)252 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
253   OopMap* oop_map = save_live_registers(sasm);
254 
255   int call_offset;
256   if (has_argument) {
257     __ ldr(R1, Address(SP, arg1_offset));
258     __ ldr(R2, Address(SP, arg2_offset));
259     call_offset = __ call_RT(noreg, noreg, target, R1, R2);
260   } else {
261     call_offset = __ call_RT(noreg, noreg, target);
262   }
263 
264   OopMapSet* oop_maps = new OopMapSet();
265   oop_maps->add_gc_map(call_offset, oop_map);
266 
267   DEBUG_ONLY(STOP("generate_exception_throw");)  // Should not reach here
268   return oop_maps;
269 }
270 
271 
restore_sp_for_method_handle(StubAssembler * sasm)272 static void restore_sp_for_method_handle(StubAssembler* sasm) {
273   // Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
274   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
275   __ cmp(Rtemp, 0);
276   __ mov(SP, Rmh_SP_save, ne);
277 }
278 
279 
generate_handle_exception(StubID id,StubAssembler * sasm)280 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
281   __ block_comment("generate_handle_exception");
282 
283   bool save_fpu_registers = false;
284 
285   // Save registers, if required.
286   OopMapSet* oop_maps = new OopMapSet();
287   OopMap* oop_map = NULL;
288 
289   switch (id) {
290   case forward_exception_id: {
291     save_fpu_registers = HaveVFP;
292     oop_map = generate_oop_map(sasm);
293     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
294     __ ldr(Rexception_pc, Address(SP, LR_offset * wordSize));
295     Register zero = __ zero_register(Rtemp);
296     __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
297     break;
298   }
299   case handle_exception_id:
300     save_fpu_registers = HaveVFP;
301     // fall-through
302   case handle_exception_nofpu_id:
303     // At this point all registers MAY be live.
304     oop_map = save_live_registers(sasm, save_fpu_registers);
305     break;
306   case handle_exception_from_callee_id:
307     // At this point all registers except exception oop (R4/R19) and
308     // exception pc (R5/R20) are dead.
309     oop_map = save_live_registers(sasm);  // TODO it's not required to save all registers
310     break;
311   default:  ShouldNotReachHere();
312   }
313 
314   __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
315   __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
316 
317   __ str(Rexception_pc, Address(SP, LR_offset * wordSize)); // patch throwing pc into return address
318 
319   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
320   oop_maps->add_gc_map(call_offset, oop_map);
321 
322   // Exception handler found
323   __ str(R0, Address(SP, LR_offset * wordSize)); // patch the return address
324 
325   // Restore the registers that were saved at the beginning, remove
326   // frame and jump to the exception handler.
327   switch (id) {
328   case forward_exception_id:
329   case handle_exception_nofpu_id:
330   case handle_exception_id:
331     restore_live_registers(sasm, save_fpu_registers);
332     // Note: the restore live registers includes the jump to LR (patched to R0)
333     break;
334   case handle_exception_from_callee_id:
335     restore_live_registers_without_return(sasm); // must not jump immediatly to handler
336     restore_sp_for_method_handle(sasm);
337     __ ret();
338     break;
339   default:  ShouldNotReachHere();
340   }
341 
342   DEBUG_ONLY(STOP("generate_handle_exception");)  // Should not reach here
343 
344   return oop_maps;
345 }
346 
347 
generate_unwind_exception(StubAssembler * sasm)348 void Runtime1::generate_unwind_exception(StubAssembler* sasm) {
349   // FP no longer used to find the frame start
350   // on entry, remove_frame() has already been called (restoring FP and LR)
351 
352   // search the exception handler address of the caller (using the return address)
353   __ mov(c_rarg0, Rthread);
354   __ mov(Rexception_pc, LR);
355   __ mov(c_rarg1, LR);
356   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
357 
358   // Exception oop should be still in Rexception_obj and pc in Rexception_pc
359   // Jump to handler
360   __ verify_not_null_oop(Rexception_obj);
361 
362   // JSR292 extension
363   restore_sp_for_method_handle(sasm);
364 
365   __ jump(R0);
366 }
367 
368 
generate_patching(StubAssembler * sasm,address target)369 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
370   OopMap* oop_map = save_live_registers(sasm);
371 
372   // call the runtime patching routine, returns non-zero if nmethod got deopted.
373   int call_offset = __ call_RT(noreg, noreg, target);
374   OopMapSet* oop_maps = new OopMapSet();
375   oop_maps->add_gc_map(call_offset, oop_map);
376 
377   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
378   assert(deopt_blob != NULL, "deoptimization blob must have been created");
379 
380   __ cmp_32(R0, 0);
381 
382   restore_live_registers_except_FP_LR(sasm);
383   __ pop(RegisterSet(FP) | RegisterSet(PC), eq);
384 
385   // Deoptimization needed
386   // TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back
387   __ pop(RegisterSet(FP) | RegisterSet(LR));
388 
389   __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
390 
391   DEBUG_ONLY(STOP("generate_patching");)  // Should not reach here
392   return oop_maps;
393 }
394 
395 
generate_code_for(StubID id,StubAssembler * sasm)396 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
397   const bool must_gc_arguments = true;
398   const bool dont_gc_arguments = false;
399 
400   OopMapSet* oop_maps = NULL;
401   bool save_fpu_registers = HaveVFP;
402 
403   switch (id) {
404     case forward_exception_id:
405       {
406         oop_maps = generate_handle_exception(id, sasm);
407         // does not return on ARM
408       }
409       break;
410 
411     case new_instance_id:
412     case fast_new_instance_id:
413     case fast_new_instance_init_check_id:
414       {
415         const Register result = R0;
416         const Register klass  = R1;
417 
418         // If TLAB is disabled, see if there is support for inlining contiguous
419         // allocations.
420         // Otherwise, just go to the slow path.
421         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) {
422           Label slow_case, slow_case_no_pop;
423 
424           // Make sure the class is fully initialized
425           if (id == fast_new_instance_init_check_id) {
426             __ ldrb(result, Address(klass, InstanceKlass::init_state_offset()));
427             __ cmp(result, InstanceKlass::fully_initialized);
428             __ b(slow_case_no_pop, ne);
429           }
430 
431           // Free some temporary registers
432           const Register obj_size = R4;
433           const Register tmp1     = R5;
434           const Register tmp2     = LR;
435           const Register obj_end  = Rtemp;
436 
437           __ raw_push(R4, R5, LR);
438 
439           __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset()));
440           __ eden_allocate(result, obj_end, tmp1, tmp2, obj_size, slow_case);        // initializes result and obj_end
441           __ initialize_object(result, obj_end, klass, noreg /* len */, tmp1, tmp2,
442                                instanceOopDesc::header_size() * HeapWordSize, -1,
443                                /* is_tlab_allocated */ false);
444           __ raw_pop_and_ret(R4, R5);
445 
446           __ bind(slow_case);
447           __ raw_pop(R4, R5, LR);
448 
449           __ bind(slow_case_no_pop);
450         }
451 
452         OopMap* map = save_live_registers(sasm);
453         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
454         oop_maps = new OopMapSet();
455         oop_maps->add_gc_map(call_offset, map);
456 
457         // MacroAssembler::StoreStore useless (included in the runtime exit path)
458 
459         restore_live_registers_except_R0(sasm);
460       }
461       break;
462 
463     case counter_overflow_id:
464       {
465         OopMap* oop_map = save_live_registers(sasm);
466         __ ldr(R1, Address(SP, arg1_offset));
467         __ ldr(R2, Address(SP, arg2_offset));
468         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2);
469         oop_maps = new OopMapSet();
470         oop_maps->add_gc_map(call_offset, oop_map);
471         restore_live_registers(sasm);
472       }
473       break;
474 
475     case new_type_array_id:
476     case new_object_array_id:
477       {
478         if (id == new_type_array_id) {
479           __ set_info("new_type_array", dont_gc_arguments);
480         } else {
481           __ set_info("new_object_array", dont_gc_arguments);
482         }
483 
484         const Register result = R0;
485         const Register klass  = R1;
486         const Register length = R2;
487 
488         // If TLAB is disabled, see if there is support for inlining contiguous
489         // allocations.
490         // Otherwise, just go to the slow path.
491         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
492           Label slow_case, slow_case_no_pop;
493 
494           __ cmp_32(length, C1_MacroAssembler::max_array_allocation_length);
495           __ b(slow_case_no_pop, hs);
496 
497           // Free some temporary registers
498           const Register arr_size = R4;
499           const Register tmp1     = R5;
500           const Register tmp2     = LR;
501           const Register tmp3     = Rtemp;
502           const Register obj_end  = tmp3;
503 
504           __ raw_push(R4, R5, LR);
505 
506           // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size)
507           __ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset()));
508           __ mov(arr_size, MinObjAlignmentInBytesMask);
509           __ and_32(tmp2, tmp1, (unsigned int)(Klass::_lh_header_size_mask << Klass::_lh_header_size_shift));
510 
511           __ add(arr_size, arr_size, AsmOperand(length, lsl, tmp1));
512 
513           __ add(arr_size, arr_size, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift));
514           __ align_reg(arr_size, arr_size, MinObjAlignmentInBytes);
515 
516           // eden_allocate destroys tmp2, so reload header_size after allocation
517           // eden_allocate initializes result and obj_end
518           __ eden_allocate(result, obj_end, tmp1, tmp2, arr_size, slow_case);
519           __ ldrb(tmp2, Address(klass, in_bytes(Klass::layout_helper_offset()) +
520                                        Klass::_lh_header_size_shift / BitsPerByte));
521           __ initialize_object(result, obj_end, klass, length, tmp1, tmp2, tmp2, -1, /* is_tlab_allocated */ false);
522           __ raw_pop_and_ret(R4, R5);
523 
524           __ bind(slow_case);
525           __ raw_pop(R4, R5, LR);
526           __ bind(slow_case_no_pop);
527         }
528 
529         OopMap* map = save_live_registers(sasm);
530         int call_offset;
531         if (id == new_type_array_id) {
532           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
533         } else {
534           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
535         }
536         oop_maps = new OopMapSet();
537         oop_maps->add_gc_map(call_offset, map);
538 
539         // MacroAssembler::StoreStore useless (included in the runtime exit path)
540 
541         restore_live_registers_except_R0(sasm);
542       }
543       break;
544 
545     case new_multi_array_id:
546       {
547         __ set_info("new_multi_array", dont_gc_arguments);
548 
549         // R0: klass
550         // R2: rank
551         // SP: address of 1st dimension
552         const Register result = R0;
553         OopMap* map = save_live_registers(sasm);
554 
555         __ mov(R1, R0);
556         __ add(R3, SP, arg1_offset);
557         int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3);
558 
559         oop_maps = new OopMapSet();
560         oop_maps->add_gc_map(call_offset, map);
561 
562         // MacroAssembler::StoreStore useless (included in the runtime exit path)
563 
564         restore_live_registers_except_R0(sasm);
565       }
566       break;
567 
568     case register_finalizer_id:
569       {
570         __ set_info("register_finalizer", dont_gc_arguments);
571 
572         // Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set
573         __ load_klass(Rtemp, R0);
574         __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
575 
576         __ tst(Rtemp, JVM_ACC_HAS_FINALIZER);
577         __ bx(LR, eq);
578 
579         // Call VM
580         OopMap* map = save_live_registers(sasm);
581         oop_maps = new OopMapSet();
582         int call_offset = __ call_RT(noreg, noreg,
583                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0);
584         oop_maps->add_gc_map(call_offset, map);
585         restore_live_registers(sasm);
586       }
587       break;
588 
589     case throw_range_check_failed_id:
590       {
591         __ set_info("range_check_failed", dont_gc_arguments);
592         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
593       }
594       break;
595 
596     case throw_index_exception_id:
597       {
598         __ set_info("index_range_check_failed", dont_gc_arguments);
599         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
600       }
601       break;
602 
603     case throw_div0_exception_id:
604       {
605         __ set_info("throw_div0_exception", dont_gc_arguments);
606         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
607       }
608       break;
609 
610     case throw_null_pointer_exception_id:
611       {
612         __ set_info("throw_null_pointer_exception", dont_gc_arguments);
613         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
614       }
615       break;
616 
617     case handle_exception_nofpu_id:
618     case handle_exception_id:
619       {
620         __ set_info("handle_exception", dont_gc_arguments);
621         oop_maps = generate_handle_exception(id, sasm);
622       }
623       break;
624 
625     case handle_exception_from_callee_id:
626       {
627         __ set_info("handle_exception_from_callee", dont_gc_arguments);
628         oop_maps = generate_handle_exception(id, sasm);
629       }
630       break;
631 
632     case unwind_exception_id:
633       {
634         __ set_info("unwind_exception", dont_gc_arguments);
635         generate_unwind_exception(sasm);
636       }
637       break;
638 
639     case throw_array_store_exception_id:
640       {
641         __ set_info("throw_array_store_exception", dont_gc_arguments);
642         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
643       }
644       break;
645 
646     case throw_class_cast_exception_id:
647       {
648         __ set_info("throw_class_cast_exception", dont_gc_arguments);
649         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
650       }
651       break;
652 
653     case throw_incompatible_class_change_error_id:
654       {
655         __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
656         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
657       }
658       break;
659 
660     case slow_subtype_check_id:
661       {
662         // (in)  R0 - sub, destroyed,
663         // (in)  R1 - super, not changed
664         // (out) R0 - result: 1 if check passed, 0 otherwise
665         __ raw_push(R2, R3, LR);
666 
667         // Load an array of secondary_supers
668         __ ldr(R2, Address(R0, Klass::secondary_supers_offset()));
669         // Length goes to R3
670         __ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes()));
671         __ add(R2, R2, Array<Klass*>::base_offset_in_bytes());
672 
673         Label loop, miss;
674         __ bind(loop);
675         __ cbz(R3, miss);
676         __ ldr(LR, Address(R2, wordSize, post_indexed));
677         __ sub(R3, R3, 1);
678         __ cmp(LR, R1);
679         __ b(loop, ne);
680 
681         // We get here if an equal cache entry is found
682         __ str(R1, Address(R0, Klass::secondary_super_cache_offset()));
683         __ mov(R0, 1);
684         __ raw_pop_and_ret(R2, R3);
685 
686         // A cache entry not found - return false
687         __ bind(miss);
688         __ mov(R0, 0);
689         __ raw_pop_and_ret(R2, R3);
690       }
691       break;
692 
693     case monitorenter_nofpu_id:
694       save_fpu_registers = false;
695       // fall through
696     case monitorenter_id:
697       {
698         __ set_info("monitorenter", dont_gc_arguments);
699         const Register obj  = R1;
700         const Register lock = R2;
701         OopMap* map = save_live_registers(sasm, save_fpu_registers);
702         __ ldr(obj, Address(SP, arg1_offset));
703         __ ldr(lock, Address(SP, arg2_offset));
704         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock);
705         oop_maps = new OopMapSet();
706         oop_maps->add_gc_map(call_offset, map);
707         restore_live_registers(sasm, save_fpu_registers);
708       }
709       break;
710 
711     case monitorexit_nofpu_id:
712       save_fpu_registers = false;
713       // fall through
714     case monitorexit_id:
715       {
716         __ set_info("monitorexit", dont_gc_arguments);
717         const Register lock = R1;
718         OopMap* map = save_live_registers(sasm, save_fpu_registers);
719         __ ldr(lock, Address(SP, arg1_offset));
720         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock);
721         oop_maps = new OopMapSet();
722         oop_maps->add_gc_map(call_offset, map);
723         restore_live_registers(sasm, save_fpu_registers);
724       }
725       break;
726 
727     case deoptimize_id:
728       {
729         __ set_info("deoptimize", dont_gc_arguments);
730         OopMap* oop_map = save_live_registers(sasm);
731         const Register trap_request = R1;
732         __ ldr(trap_request, Address(SP, arg1_offset));
733         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
734         oop_maps = new OopMapSet();
735         oop_maps->add_gc_map(call_offset, oop_map);
736         restore_live_registers_without_return(sasm);
737         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
738         assert(deopt_blob != NULL, "deoptimization blob must have been created");
739         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, noreg);
740       }
741       break;
742 
743     case access_field_patching_id:
744       {
745         __ set_info("access_field_patching", dont_gc_arguments);
746         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
747       }
748       break;
749 
750     case load_klass_patching_id:
751       {
752         __ set_info("load_klass_patching", dont_gc_arguments);
753         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
754       }
755       break;
756 
757     case load_appendix_patching_id:
758       {
759         __ set_info("load_appendix_patching", dont_gc_arguments);
760         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
761       }
762       break;
763 
764     case load_mirror_patching_id:
765       {
766         __ set_info("load_mirror_patching", dont_gc_arguments);
767         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
768       }
769       break;
770 
771     case predicate_failed_trap_id:
772       {
773         __ set_info("predicate_failed_trap", dont_gc_arguments);
774 
775         OopMap* oop_map = save_live_registers(sasm);
776         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
777 
778         oop_maps = new OopMapSet();
779         oop_maps->add_gc_map(call_offset, oop_map);
780 
781         restore_live_registers_without_return(sasm);
782 
783         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
784         assert(deopt_blob != NULL, "deoptimization blob must have been created");
785         __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp);
786       }
787       break;
788 
789     default:
790       {
791         __ set_info("unimplemented entry", dont_gc_arguments);
792         STOP("unimplemented entry");
793       }
794       break;
795   }
796   return oop_maps;
797 }
798 
799 #undef __
800 
801 #ifdef __SOFTFP__
pd_name_for_address(address entry)802 const char *Runtime1::pd_name_for_address(address entry) {
803 
804 #define FUNCTION_CASE(a, f) \
805   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
806 
807   FUNCTION_CASE(entry, __aeabi_fadd_glibc);
808   FUNCTION_CASE(entry, __aeabi_fmul);
809   FUNCTION_CASE(entry, __aeabi_fsub_glibc);
810   FUNCTION_CASE(entry, __aeabi_fdiv);
811 
812   // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
813   FUNCTION_CASE(entry, __aeabi_dadd_glibc);
814   FUNCTION_CASE(entry, __aeabi_dmul);
815   FUNCTION_CASE(entry, __aeabi_dsub_glibc);
816   FUNCTION_CASE(entry, __aeabi_ddiv);
817 
818   FUNCTION_CASE(entry, __aeabi_f2d);
819   FUNCTION_CASE(entry, __aeabi_d2f);
820   FUNCTION_CASE(entry, __aeabi_i2f);
821   FUNCTION_CASE(entry, __aeabi_i2d);
822   FUNCTION_CASE(entry, __aeabi_f2iz);
823 
824   FUNCTION_CASE(entry, SharedRuntime::fcmpl);
825   FUNCTION_CASE(entry, SharedRuntime::fcmpg);
826   FUNCTION_CASE(entry, SharedRuntime::dcmpl);
827   FUNCTION_CASE(entry, SharedRuntime::dcmpg);
828 
829   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt);
830   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt);
831   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);
832   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);
833   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge);
834   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge);
835   FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt);
836   FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt);
837 
838   FUNCTION_CASE(entry, SharedRuntime::fneg);
839   FUNCTION_CASE(entry, SharedRuntime::dneg);
840 
841   FUNCTION_CASE(entry, __aeabi_fcmpeq);
842   FUNCTION_CASE(entry, __aeabi_fcmplt);
843   FUNCTION_CASE(entry, __aeabi_fcmple);
844   FUNCTION_CASE(entry, __aeabi_fcmpge);
845   FUNCTION_CASE(entry, __aeabi_fcmpgt);
846 
847   FUNCTION_CASE(entry, __aeabi_dcmpeq);
848   FUNCTION_CASE(entry, __aeabi_dcmplt);
849   FUNCTION_CASE(entry, __aeabi_dcmple);
850   FUNCTION_CASE(entry, __aeabi_dcmpge);
851   FUNCTION_CASE(entry, __aeabi_dcmpgt);
852 #undef FUNCTION_CASE
853   return "";
854 }
855 #else  // __SOFTFP__
pd_name_for_address(address entry)856 const char *Runtime1::pd_name_for_address(address entry) {
857   return "<unknown function>";
858 }
859 #endif // __SOFTFP__
860