1 /*
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/shared/barrierSetAssembler.hpp"
28 #include "interpreter/interp_masm.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/cpCache.hpp"
34 #include "oops/methodData.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "prims/methodHandles.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "runtime/synchronizer.hpp"
42
43 #define __ _masm->
44
45 //----------------------------------------------------------------------------------------------------
46 // Platform-dependent initialization
47
pd_initialize()48 void TemplateTable::pd_initialize() {
49 // No arm specific initialization
50 }
51
52 //----------------------------------------------------------------------------------------------------
53 // Address computation
54
55 // local variables
iaddress(int n)56 static inline Address iaddress(int n) {
57 return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
58 }
59
laddress(int n)60 static inline Address laddress(int n) { return iaddress(n + 1); }
61 #ifndef AARCH64
haddress(int n)62 static inline Address haddress(int n) { return iaddress(n + 0); }
63 #endif // !AARCH64
64
faddress(int n)65 static inline Address faddress(int n) { return iaddress(n); }
daddress(int n)66 static inline Address daddress(int n) { return laddress(n); }
aaddress(int n)67 static inline Address aaddress(int n) { return iaddress(n); }
68
69
get_local_base_addr(Register r,Register index)70 void TemplateTable::get_local_base_addr(Register r, Register index) {
71 __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
72 }
73
load_iaddress(Register index,Register scratch)74 Address TemplateTable::load_iaddress(Register index, Register scratch) {
75 #ifdef AARCH64
76 get_local_base_addr(scratch, index);
77 return Address(scratch);
78 #else
79 return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
80 #endif // AARCH64
81 }
82
load_aaddress(Register index,Register scratch)83 Address TemplateTable::load_aaddress(Register index, Register scratch) {
84 return load_iaddress(index, scratch);
85 }
86
load_faddress(Register index,Register scratch)87 Address TemplateTable::load_faddress(Register index, Register scratch) {
88 #ifdef __SOFTFP__
89 return load_iaddress(index, scratch);
90 #else
91 get_local_base_addr(scratch, index);
92 return Address(scratch);
93 #endif // __SOFTFP__
94 }
95
load_daddress(Register index,Register scratch)96 Address TemplateTable::load_daddress(Register index, Register scratch) {
97 get_local_base_addr(scratch, index);
98 return Address(scratch, Interpreter::local_offset_in_bytes(1));
99 }
100
101 // At top of Java expression stack which may be different than SP.
102 // It isn't for category 1 objects.
at_tos()103 static inline Address at_tos() {
104 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
105 }
106
at_tos_p1()107 static inline Address at_tos_p1() {
108 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
109 }
110
at_tos_p2()111 static inline Address at_tos_p2() {
112 return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
113 }
114
115
116 // 32-bit ARM:
117 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
118 // separate ldr instructions (supports nonadjacent values).
119 // Used for longs in all modes, and for doubles in SOFTFP mode.
120 //
121 // AArch64: loads long local into R0_tos.
122 //
load_category2_local(Register Rlocal_index,Register tmp)123 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
124 const Register Rlocal_base = tmp;
125 assert_different_registers(Rlocal_index, tmp);
126
127 get_local_base_addr(Rlocal_base, Rlocal_index);
128 #ifdef AARCH64
129 __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
130 #else
131 __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
132 __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
133 #endif // AARCH64
134 }
135
136
137 // 32-bit ARM:
138 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
139 // separate str instructions (supports nonadjacent values).
140 // Used for longs in all modes, and for doubles in SOFTFP mode
141 //
142 // AArch64: stores R0_tos to long local.
143 //
store_category2_local(Register Rlocal_index,Register tmp)144 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
145 const Register Rlocal_base = tmp;
146 assert_different_registers(Rlocal_index, tmp);
147
148 get_local_base_addr(Rlocal_base, Rlocal_index);
149 #ifdef AARCH64
150 __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
151 #else
152 __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
153 __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
154 #endif // AARCH64
155 }
156
157 // Returns address of Java array element using temp register as address base.
get_array_elem_addr(BasicType elemType,Register array,Register index,Register temp)158 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
159 int logElemSize = exact_log2(type2aelembytes(elemType));
160 __ add_ptr_scaled_int32(temp, array, index, logElemSize);
161 return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
162 }
163
164 //----------------------------------------------------------------------------------------------------
165 // Condition conversion
convNegCond(TemplateTable::Condition cc)166 AsmCondition convNegCond(TemplateTable::Condition cc) {
167 switch (cc) {
168 case TemplateTable::equal : return ne;
169 case TemplateTable::not_equal : return eq;
170 case TemplateTable::less : return ge;
171 case TemplateTable::less_equal : return gt;
172 case TemplateTable::greater : return le;
173 case TemplateTable::greater_equal: return lt;
174 }
175 ShouldNotReachHere();
176 return nv;
177 }
178
179 //----------------------------------------------------------------------------------------------------
180 // Miscelaneous helper routines
181
182 // Store an oop (or NULL) at the address described by obj.
183 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
184 // Also destroys new_val and obj.base().
do_oop_store(InterpreterMacroAssembler * _masm,Address obj,Register new_val,Register tmp1,Register tmp2,Register tmp3,bool is_null,DecoratorSet decorators=0)185 static void do_oop_store(InterpreterMacroAssembler* _masm,
186 Address obj,
187 Register new_val,
188 Register tmp1,
189 Register tmp2,
190 Register tmp3,
191 bool is_null,
192 DecoratorSet decorators = 0) {
193
194 assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
195 if (is_null) {
196 __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
197 } else {
198 __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
199 }
200 }
201
do_oop_load(InterpreterMacroAssembler * _masm,Register dst,Address obj,DecoratorSet decorators=0)202 static void do_oop_load(InterpreterMacroAssembler* _masm,
203 Register dst,
204 Address obj,
205 DecoratorSet decorators = 0) {
206 __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
207 }
208
at_bcp(int offset)209 Address TemplateTable::at_bcp(int offset) {
210 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
211 return Address(Rbcp, offset);
212 }
213
214
215 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR.
patch_bytecode(Bytecodes::Code bc,Register bc_reg,Register temp_reg,bool load_bc_into_bc_reg,int byte_no)216 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
217 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
218 int byte_no) {
219 assert_different_registers(bc_reg, temp_reg);
220 if (!RewriteBytecodes) return;
221 Label L_patch_done;
222
223 switch (bc) {
224 case Bytecodes::_fast_aputfield:
225 case Bytecodes::_fast_bputfield:
226 case Bytecodes::_fast_zputfield:
227 case Bytecodes::_fast_cputfield:
228 case Bytecodes::_fast_dputfield:
229 case Bytecodes::_fast_fputfield:
230 case Bytecodes::_fast_iputfield:
231 case Bytecodes::_fast_lputfield:
232 case Bytecodes::_fast_sputfield:
233 {
234 // We skip bytecode quickening for putfield instructions when
235 // the put_code written to the constant pool cache is zero.
236 // This is required so that every execution of this instruction
237 // calls out to InterpreterRuntime::resolve_get_put to do
238 // additional, required work.
239 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
240 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
241 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
242 __ mov(bc_reg, bc);
243 __ cbz(temp_reg, L_patch_done); // test if bytecode is zero
244 }
245 break;
246 default:
247 assert(byte_no == -1, "sanity");
248 // the pair bytecodes have already done the load.
249 if (load_bc_into_bc_reg) {
250 __ mov(bc_reg, bc);
251 }
252 }
253
254 if (__ can_post_breakpoint()) {
255 Label L_fast_patch;
256 // if a breakpoint is present we can't rewrite the stream directly
257 __ ldrb(temp_reg, at_bcp(0));
258 __ cmp(temp_reg, Bytecodes::_breakpoint);
259 __ b(L_fast_patch, ne);
260 if (bc_reg != R3) {
261 __ mov(R3, bc_reg);
262 }
263 __ mov(R1, Rmethod);
264 __ mov(R2, Rbcp);
265 // Let breakpoint table handling rewrite to quicker bytecode
266 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
267 __ b(L_patch_done);
268 __ bind(L_fast_patch);
269 }
270
271 #ifdef ASSERT
272 Label L_okay;
273 __ ldrb(temp_reg, at_bcp(0));
274 __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
275 __ b(L_okay, eq);
276 __ cmp(temp_reg, bc_reg);
277 __ b(L_okay, eq);
278 __ stop("patching the wrong bytecode");
279 __ bind(L_okay);
280 #endif
281
282 // patch bytecode
283 __ strb(bc_reg, at_bcp(0));
284 __ bind(L_patch_done);
285 }
286
287 //----------------------------------------------------------------------------------------------------
288 // Individual instructions
289
nop()290 void TemplateTable::nop() {
291 transition(vtos, vtos);
292 // nothing to do
293 }
294
shouldnotreachhere()295 void TemplateTable::shouldnotreachhere() {
296 transition(vtos, vtos);
297 __ stop("shouldnotreachhere bytecode");
298 }
299
300
301
aconst_null()302 void TemplateTable::aconst_null() {
303 transition(vtos, atos);
304 __ mov(R0_tos, 0);
305 }
306
307
iconst(int value)308 void TemplateTable::iconst(int value) {
309 transition(vtos, itos);
310 __ mov_slow(R0_tos, value);
311 }
312
313
lconst(int value)314 void TemplateTable::lconst(int value) {
315 transition(vtos, ltos);
316 assert((value == 0) || (value == 1), "unexpected long constant");
317 __ mov(R0_tos, value);
318 #ifndef AARCH64
319 __ mov(R1_tos_hi, 0);
320 #endif // !AARCH64
321 }
322
323
fconst(int value)324 void TemplateTable::fconst(int value) {
325 transition(vtos, ftos);
326 #ifdef AARCH64
327 switch(value) {
328 case 0: __ fmov_sw(S0_tos, ZR); break;
329 case 1: __ fmov_s (S0_tos, 0x70); break;
330 case 2: __ fmov_s (S0_tos, 0x00); break;
331 default: ShouldNotReachHere(); break;
332 }
333 #else
334 const int zero = 0; // 0.0f
335 const int one = 0x3f800000; // 1.0f
336 const int two = 0x40000000; // 2.0f
337
338 switch(value) {
339 case 0: __ mov(R0_tos, zero); break;
340 case 1: __ mov(R0_tos, one); break;
341 case 2: __ mov(R0_tos, two); break;
342 default: ShouldNotReachHere(); break;
343 }
344
345 #ifndef __SOFTFP__
346 __ fmsr(S0_tos, R0_tos);
347 #endif // !__SOFTFP__
348 #endif // AARCH64
349 }
350
351
dconst(int value)352 void TemplateTable::dconst(int value) {
353 transition(vtos, dtos);
354 #ifdef AARCH64
355 switch(value) {
356 case 0: __ fmov_dx(D0_tos, ZR); break;
357 case 1: __ fmov_d (D0_tos, 0x70); break;
358 default: ShouldNotReachHere(); break;
359 }
360 #else
361 const int one_lo = 0; // low part of 1.0
362 const int one_hi = 0x3ff00000; // high part of 1.0
363
364 if (value == 0) {
365 #ifdef __SOFTFP__
366 __ mov(R0_tos_lo, 0);
367 __ mov(R1_tos_hi, 0);
368 #else
369 __ mov(R0_tmp, 0);
370 __ fmdrr(D0_tos, R0_tmp, R0_tmp);
371 #endif // __SOFTFP__
372 } else if (value == 1) {
373 __ mov(R0_tos_lo, one_lo);
374 __ mov_slow(R1_tos_hi, one_hi);
375 #ifndef __SOFTFP__
376 __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
377 #endif // !__SOFTFP__
378 } else {
379 ShouldNotReachHere();
380 }
381 #endif // AARCH64
382 }
383
384
bipush()385 void TemplateTable::bipush() {
386 transition(vtos, itos);
387 __ ldrsb(R0_tos, at_bcp(1));
388 }
389
390
sipush()391 void TemplateTable::sipush() {
392 transition(vtos, itos);
393 __ ldrsb(R0_tmp, at_bcp(1));
394 __ ldrb(R1_tmp, at_bcp(2));
395 __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
396 }
397
398
ldc(bool wide)399 void TemplateTable::ldc(bool wide) {
400 transition(vtos, vtos);
401 Label fastCase, Condy, Done;
402
403 const Register Rindex = R1_tmp;
404 const Register Rcpool = R2_tmp;
405 const Register Rtags = R3_tmp;
406 const Register RtagType = R3_tmp;
407
408 if (wide) {
409 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
410 } else {
411 __ ldrb(Rindex, at_bcp(1));
412 }
413 __ get_cpool_and_tags(Rcpool, Rtags);
414
415 const int base_offset = ConstantPool::header_size() * wordSize;
416 const int tags_offset = Array<u1>::base_offset_in_bytes();
417
418 // get const type
419 __ add(Rtemp, Rtags, tags_offset);
420 #ifdef AARCH64
421 __ add(Rtemp, Rtemp, Rindex);
422 __ ldarb(RtagType, Rtemp); // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough
423 #else
424 __ ldrb(RtagType, Address(Rtemp, Rindex));
425 volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
426 #endif // AARCH64
427
428 // unresolved class - get the resolved class
429 __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
430
431 // unresolved class in error (resolution failed) - call into runtime
432 // so that the same error from first resolution attempt is thrown.
433 #ifdef AARCH64
434 __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint
435 __ cond_cmp(RtagType, Rtemp, ne);
436 #else
437 __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
438 #endif // AARCH64
439
440 // resolved class - need to call vm to get java mirror of the class
441 __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
442
443 __ b(fastCase, ne);
444
445 // slow case - call runtime
446 __ mov(R1, wide);
447 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
448 __ push(atos);
449 __ b(Done);
450
451 // int, float, String
452 __ bind(fastCase);
453
454 __ cmp(RtagType, JVM_CONSTANT_Integer);
455 __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
456 __ b(Condy, ne);
457
458 // itos, ftos
459 __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
460 __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
461
462 // floats and ints are placed on stack in the same way, so
463 // we can use push(itos) to transfer float value without VFP
464 __ push(itos);
465 __ b(Done);
466
467 __ bind(Condy);
468 condy_helper(Done);
469
470 __ bind(Done);
471 }
472
473 // Fast path for caching oop constants.
fast_aldc(bool wide)474 void TemplateTable::fast_aldc(bool wide) {
475 transition(vtos, atos);
476 int index_size = wide ? sizeof(u2) : sizeof(u1);
477 Label resolved;
478
479 // We are resolved if the resolved reference cache entry contains a
480 // non-null object (CallSite, etc.)
481 assert_different_registers(R0_tos, R2_tmp);
482 __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
483 __ load_resolved_reference_at_index(R0_tos, R2_tmp);
484 __ cbnz(R0_tos, resolved);
485
486 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
487
488 // first time invocation - must resolve first
489 __ mov(R1, (int)bytecode());
490 __ call_VM(R0_tos, entry, R1);
491 __ bind(resolved);
492
493 { // Check for the null sentinel.
494 // If we just called the VM, that already did the mapping for us,
495 // but it's harmless to retry.
496 Label notNull;
497 Register result = R0;
498 Register tmp = R1;
499 Register rarg = R2;
500
501 // Stash null_sentinel address to get its value later
502 __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
503 __ ldr(tmp, Address(rarg));
504 __ cmp(result, tmp);
505 __ b(notNull, ne);
506 __ mov(result, 0); // NULL object reference
507 __ bind(notNull);
508 }
509
510 if (VerifyOops) {
511 __ verify_oop(R0_tos);
512 }
513 }
514
ldc2_w()515 void TemplateTable::ldc2_w() {
516 transition(vtos, vtos);
517 const Register Rtags = R2_tmp;
518 const Register Rindex = R3_tmp;
519 const Register Rcpool = R4_tmp;
520 const Register Rbase = R5_tmp;
521
522 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
523
524 __ get_cpool_and_tags(Rcpool, Rtags);
525 const int base_offset = ConstantPool::header_size() * wordSize;
526 const int tags_offset = Array<u1>::base_offset_in_bytes();
527
528 __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
529
530 // get type from tags
531 __ add(Rtemp, Rtags, tags_offset);
532 __ ldrb(Rtemp, Address(Rtemp, Rindex));
533
534 Label Condy, exit;
535 #ifdef __ABI_HARD__
536 Label NotDouble;
537 __ cmp(Rtemp, JVM_CONSTANT_Double);
538 __ b(NotDouble, ne);
539 __ ldr_double(D0_tos, Address(Rbase, base_offset));
540
541 __ push(dtos);
542 __ b(exit);
543 __ bind(NotDouble);
544 #endif
545
546 __ cmp(Rtemp, JVM_CONSTANT_Long);
547 __ b(Condy, ne);
548 #ifdef AARCH64
549 __ ldr(R0_tos, Address(Rbase, base_offset));
550 #else
551 __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
552 __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
553 #endif // AARCH64
554 __ push(ltos);
555 __ b(exit);
556
557 __ bind(Condy);
558 condy_helper(exit);
559
560 __ bind(exit);
561 }
562
563
condy_helper(Label & Done)564 void TemplateTable::condy_helper(Label& Done)
565 {
566 Register obj = R0_tmp;
567 Register rtmp = R1_tmp;
568 Register flags = R2_tmp;
569 Register off = R3_tmp;
570
571 __ mov(rtmp, (int) bytecode());
572 __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
573 __ get_vm_result_2(flags, rtmp);
574
575 // VMr = obj = base address to find primitive value to push
576 // VMr2 = flags = (tos, off) using format of CPCE::_flags
577 __ mov(off, flags);
578
579 #ifdef AARCH64
580 __ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask);
581 #else
582 __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
583 __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
584 #endif
585
586 const Address field(obj, off);
587
588 __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
589 // Make sure we don't need to mask flags after the above shift
590 ConstantPoolCacheEntry::verify_tos_state_shift();
591
592 switch (bytecode()) {
593 case Bytecodes::_ldc:
594 case Bytecodes::_ldc_w:
595 {
596 // tos in (itos, ftos, stos, btos, ctos, ztos)
597 Label notIntFloat, notShort, notByte, notChar, notBool;
598 __ cmp(flags, itos);
599 __ cond_cmp(flags, ftos, ne);
600 __ b(notIntFloat, ne);
601 __ ldr(R0_tos, field);
602 __ push(itos);
603 __ b(Done);
604
605 __ bind(notIntFloat);
606 __ cmp(flags, stos);
607 __ b(notShort, ne);
608 __ ldrsh(R0_tos, field);
609 __ push(stos);
610 __ b(Done);
611
612 __ bind(notShort);
613 __ cmp(flags, btos);
614 __ b(notByte, ne);
615 __ ldrsb(R0_tos, field);
616 __ push(btos);
617 __ b(Done);
618
619 __ bind(notByte);
620 __ cmp(flags, ctos);
621 __ b(notChar, ne);
622 __ ldrh(R0_tos, field);
623 __ push(ctos);
624 __ b(Done);
625
626 __ bind(notChar);
627 __ cmp(flags, ztos);
628 __ b(notBool, ne);
629 __ ldrsb(R0_tos, field);
630 __ push(ztos);
631 __ b(Done);
632
633 __ bind(notBool);
634 break;
635 }
636
637 case Bytecodes::_ldc2_w:
638 {
639 Label notLongDouble;
640 __ cmp(flags, ltos);
641 __ cond_cmp(flags, dtos, ne);
642 __ b(notLongDouble, ne);
643
644 #ifdef AARCH64
645 __ ldr(R0_tos, field);
646 #else
647 __ add(rtmp, obj, wordSize);
648 __ ldr(R0_tos_lo, Address(obj, off));
649 __ ldr(R1_tos_hi, Address(rtmp, off));
650 #endif
651 __ push(ltos);
652 __ b(Done);
653
654 __ bind(notLongDouble);
655
656 break;
657 }
658
659 default:
660 ShouldNotReachHere();
661 }
662
663 __ stop("bad ldc/condy");
664 }
665
666
locals_index(Register reg,int offset)667 void TemplateTable::locals_index(Register reg, int offset) {
668 __ ldrb(reg, at_bcp(offset));
669 }
670
iload()671 void TemplateTable::iload() {
672 iload_internal();
673 }
674
nofast_iload()675 void TemplateTable::nofast_iload() {
676 iload_internal(may_not_rewrite);
677 }
678
iload_internal(RewriteControl rc)679 void TemplateTable::iload_internal(RewriteControl rc) {
680 transition(vtos, itos);
681
682 if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
683 Label rewrite, done;
684 const Register next_bytecode = R1_tmp;
685 const Register target_bytecode = R2_tmp;
686
687 // get next byte
688 __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
689 // if _iload, wait to rewrite to iload2. We only want to rewrite the
690 // last two iloads in a pair. Comparing against fast_iload means that
691 // the next bytecode is neither an iload or a caload, and therefore
692 // an iload pair.
693 __ cmp(next_bytecode, Bytecodes::_iload);
694 __ b(done, eq);
695
696 __ cmp(next_bytecode, Bytecodes::_fast_iload);
697 __ mov(target_bytecode, Bytecodes::_fast_iload2);
698 __ b(rewrite, eq);
699
700 // if _caload, rewrite to fast_icaload
701 __ cmp(next_bytecode, Bytecodes::_caload);
702 __ mov(target_bytecode, Bytecodes::_fast_icaload);
703 __ b(rewrite, eq);
704
705 // rewrite so iload doesn't check again.
706 __ mov(target_bytecode, Bytecodes::_fast_iload);
707
708 // rewrite
709 // R2: fast bytecode
710 __ bind(rewrite);
711 patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
712 __ bind(done);
713 }
714
715 // Get the local value into tos
716 const Register Rlocal_index = R1_tmp;
717 locals_index(Rlocal_index);
718 Address local = load_iaddress(Rlocal_index, Rtemp);
719 __ ldr_s32(R0_tos, local);
720 }
721
722
fast_iload2()723 void TemplateTable::fast_iload2() {
724 transition(vtos, itos);
725 const Register Rlocal_index = R1_tmp;
726
727 locals_index(Rlocal_index);
728 Address local = load_iaddress(Rlocal_index, Rtemp);
729 __ ldr_s32(R0_tos, local);
730 __ push(itos);
731
732 locals_index(Rlocal_index, 3);
733 local = load_iaddress(Rlocal_index, Rtemp);
734 __ ldr_s32(R0_tos, local);
735 }
736
fast_iload()737 void TemplateTable::fast_iload() {
738 transition(vtos, itos);
739 const Register Rlocal_index = R1_tmp;
740
741 locals_index(Rlocal_index);
742 Address local = load_iaddress(Rlocal_index, Rtemp);
743 __ ldr_s32(R0_tos, local);
744 }
745
746
lload()747 void TemplateTable::lload() {
748 transition(vtos, ltos);
749 const Register Rlocal_index = R2_tmp;
750
751 locals_index(Rlocal_index);
752 load_category2_local(Rlocal_index, R3_tmp);
753 }
754
755
fload()756 void TemplateTable::fload() {
757 transition(vtos, ftos);
758 const Register Rlocal_index = R2_tmp;
759
760 // Get the local value into tos
761 locals_index(Rlocal_index);
762 Address local = load_faddress(Rlocal_index, Rtemp);
763 #ifdef __SOFTFP__
764 __ ldr(R0_tos, local);
765 #else
766 __ ldr_float(S0_tos, local);
767 #endif // __SOFTFP__
768 }
769
770
dload()771 void TemplateTable::dload() {
772 transition(vtos, dtos);
773 const Register Rlocal_index = R2_tmp;
774
775 locals_index(Rlocal_index);
776
777 #ifdef __SOFTFP__
778 load_category2_local(Rlocal_index, R3_tmp);
779 #else
780 __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
781 #endif // __SOFTFP__
782 }
783
784
aload()785 void TemplateTable::aload() {
786 transition(vtos, atos);
787 const Register Rlocal_index = R1_tmp;
788
789 locals_index(Rlocal_index);
790 Address local = load_aaddress(Rlocal_index, Rtemp);
791 __ ldr(R0_tos, local);
792 }
793
794
locals_index_wide(Register reg)795 void TemplateTable::locals_index_wide(Register reg) {
796 assert_different_registers(reg, Rtemp);
797 __ ldrb(Rtemp, at_bcp(2));
798 __ ldrb(reg, at_bcp(3));
799 __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
800 }
801
802
wide_iload()803 void TemplateTable::wide_iload() {
804 transition(vtos, itos);
805 const Register Rlocal_index = R2_tmp;
806
807 locals_index_wide(Rlocal_index);
808 Address local = load_iaddress(Rlocal_index, Rtemp);
809 __ ldr_s32(R0_tos, local);
810 }
811
812
wide_lload()813 void TemplateTable::wide_lload() {
814 transition(vtos, ltos);
815 const Register Rlocal_index = R2_tmp;
816 const Register Rlocal_base = R3_tmp;
817
818 locals_index_wide(Rlocal_index);
819 load_category2_local(Rlocal_index, R3_tmp);
820 }
821
822
wide_fload()823 void TemplateTable::wide_fload() {
824 transition(vtos, ftos);
825 const Register Rlocal_index = R2_tmp;
826
827 locals_index_wide(Rlocal_index);
828 Address local = load_faddress(Rlocal_index, Rtemp);
829 #ifdef __SOFTFP__
830 __ ldr(R0_tos, local);
831 #else
832 __ ldr_float(S0_tos, local);
833 #endif // __SOFTFP__
834 }
835
836
wide_dload()837 void TemplateTable::wide_dload() {
838 transition(vtos, dtos);
839 const Register Rlocal_index = R2_tmp;
840
841 locals_index_wide(Rlocal_index);
842 #ifdef __SOFTFP__
843 load_category2_local(Rlocal_index, R3_tmp);
844 #else
845 __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
846 #endif // __SOFTFP__
847 }
848
849
wide_aload()850 void TemplateTable::wide_aload() {
851 transition(vtos, atos);
852 const Register Rlocal_index = R2_tmp;
853
854 locals_index_wide(Rlocal_index);
855 Address local = load_aaddress(Rlocal_index, Rtemp);
856 __ ldr(R0_tos, local);
857 }
858
index_check(Register array,Register index)859 void TemplateTable::index_check(Register array, Register index) {
860 // Pop ptr into array
861 __ pop_ptr(array);
862 index_check_without_pop(array, index);
863 }
864
index_check_without_pop(Register array,Register index)865 void TemplateTable::index_check_without_pop(Register array, Register index) {
866 assert_different_registers(array, index, Rtemp);
867 // check array
868 __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
869 // check index
870 __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
871 __ cmp_32(index, Rtemp);
872 if (index != R4_ArrayIndexOutOfBounds_index) {
873 // convention with generate_ArrayIndexOutOfBounds_handler()
874 __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
875 }
876 __ mov(R1, array, hs);
877 __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
878 }
879
880
iaload()881 void TemplateTable::iaload() {
882 transition(itos, itos);
883 const Register Rarray = R1_tmp;
884 const Register Rindex = R0_tos;
885
886 index_check(Rarray, Rindex);
887 __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
888 }
889
890
laload()891 void TemplateTable::laload() {
892 transition(itos, ltos);
893 const Register Rarray = R1_tmp;
894 const Register Rindex = R0_tos;
895
896 index_check(Rarray, Rindex);
897
898 #ifdef AARCH64
899 __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
900 #else
901 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
902 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
903 __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
904 #endif // AARCH64
905 }
906
907
faload()908 void TemplateTable::faload() {
909 transition(itos, ftos);
910 const Register Rarray = R1_tmp;
911 const Register Rindex = R0_tos;
912
913 index_check(Rarray, Rindex);
914
915 Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
916 #ifdef __SOFTFP__
917 __ ldr(R0_tos, addr);
918 #else
919 __ ldr_float(S0_tos, addr);
920 #endif // __SOFTFP__
921 }
922
923
daload()924 void TemplateTable::daload() {
925 transition(itos, dtos);
926 const Register Rarray = R1_tmp;
927 const Register Rindex = R0_tos;
928
929 index_check(Rarray, Rindex);
930
931 #ifdef __SOFTFP__
932 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
933 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
934 __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
935 #else
936 __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
937 #endif // __SOFTFP__
938 }
939
940
aaload()941 void TemplateTable::aaload() {
942 transition(itos, atos);
943 const Register Rarray = R1_tmp;
944 const Register Rindex = R0_tos;
945
946 index_check(Rarray, Rindex);
947 do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
948 }
949
950
baload()951 void TemplateTable::baload() {
952 transition(itos, itos);
953 const Register Rarray = R1_tmp;
954 const Register Rindex = R0_tos;
955
956 index_check(Rarray, Rindex);
957 __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
958 }
959
960
caload()961 void TemplateTable::caload() {
962 transition(itos, itos);
963 const Register Rarray = R1_tmp;
964 const Register Rindex = R0_tos;
965
966 index_check(Rarray, Rindex);
967 __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
968 }
969
970
971 // iload followed by caload frequent pair
fast_icaload()972 void TemplateTable::fast_icaload() {
973 transition(vtos, itos);
974 const Register Rlocal_index = R1_tmp;
975 const Register Rarray = R1_tmp;
976 const Register Rindex = R4_tmp; // index_check prefers index on R4
977 assert_different_registers(Rlocal_index, Rindex);
978 assert_different_registers(Rarray, Rindex);
979
980 // load index out of locals
981 locals_index(Rlocal_index);
982 Address local = load_iaddress(Rlocal_index, Rtemp);
983 __ ldr_s32(Rindex, local);
984
985 // get array element
986 index_check(Rarray, Rindex);
987 __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
988 }
989
990
saload()991 void TemplateTable::saload() {
992 transition(itos, itos);
993 const Register Rarray = R1_tmp;
994 const Register Rindex = R0_tos;
995
996 index_check(Rarray, Rindex);
997 __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
998 }
999
1000
iload(int n)1001 void TemplateTable::iload(int n) {
1002 transition(vtos, itos);
1003 __ ldr_s32(R0_tos, iaddress(n));
1004 }
1005
1006
lload(int n)1007 void TemplateTable::lload(int n) {
1008 transition(vtos, ltos);
1009 #ifdef AARCH64
1010 __ ldr(R0_tos, laddress(n));
1011 #else
1012 __ ldr(R0_tos_lo, laddress(n));
1013 __ ldr(R1_tos_hi, haddress(n));
1014 #endif // AARCH64
1015 }
1016
1017
fload(int n)1018 void TemplateTable::fload(int n) {
1019 transition(vtos, ftos);
1020 #ifdef __SOFTFP__
1021 __ ldr(R0_tos, faddress(n));
1022 #else
1023 __ ldr_float(S0_tos, faddress(n));
1024 #endif // __SOFTFP__
1025 }
1026
1027
dload(int n)1028 void TemplateTable::dload(int n) {
1029 transition(vtos, dtos);
1030 #ifdef __SOFTFP__
1031 __ ldr(R0_tos_lo, laddress(n));
1032 __ ldr(R1_tos_hi, haddress(n));
1033 #else
1034 __ ldr_double(D0_tos, daddress(n));
1035 #endif // __SOFTFP__
1036 }
1037
1038
aload(int n)1039 void TemplateTable::aload(int n) {
1040 transition(vtos, atos);
1041 __ ldr(R0_tos, aaddress(n));
1042 }
1043
aload_0()1044 void TemplateTable::aload_0() {
1045 aload_0_internal();
1046 }
1047
nofast_aload_0()1048 void TemplateTable::nofast_aload_0() {
1049 aload_0_internal(may_not_rewrite);
1050 }
1051
aload_0_internal(RewriteControl rc)1052 void TemplateTable::aload_0_internal(RewriteControl rc) {
1053 transition(vtos, atos);
1054 // According to bytecode histograms, the pairs:
1055 //
1056 // _aload_0, _fast_igetfield
1057 // _aload_0, _fast_agetfield
1058 // _aload_0, _fast_fgetfield
1059 //
1060 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
1061 // bytecode checks if the next bytecode is either _fast_igetfield,
1062 // _fast_agetfield or _fast_fgetfield and then rewrites the
1063 // current bytecode into a pair bytecode; otherwise it rewrites the current
1064 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
1065 //
1066 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
1067 // otherwise we may miss an opportunity for a pair.
1068 //
1069 // Also rewrite frequent pairs
1070 // aload_0, aload_1
1071 // aload_0, iload_1
1072 // These bytecodes with a small amount of code are most profitable to rewrite
1073 if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1074 Label rewrite, done;
1075 const Register next_bytecode = R1_tmp;
1076 const Register target_bytecode = R2_tmp;
1077
1078 // get next byte
1079 __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1080
1081 // if _getfield then wait with rewrite
1082 __ cmp(next_bytecode, Bytecodes::_getfield);
1083 __ b(done, eq);
1084
1085 // if _igetfield then rewrite to _fast_iaccess_0
1086 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1087 __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1088 __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1089 __ b(rewrite, eq);
1090
1091 // if _agetfield then rewrite to _fast_aaccess_0
1092 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1093 __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1094 __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1095 __ b(rewrite, eq);
1096
1097 // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1098 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1099 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1100
1101 __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1102 #ifdef AARCH64
1103 __ mov(Rtemp, Bytecodes::_fast_faccess_0);
1104 __ mov(target_bytecode, Bytecodes::_fast_aload_0);
1105 __ mov(target_bytecode, Rtemp, eq);
1106 #else
1107 __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1108 __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1109 #endif // AARCH64
1110
1111 // rewrite
1112 __ bind(rewrite);
1113 patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1114
1115 __ bind(done);
1116 }
1117
1118 aload(0);
1119 }
1120
istore()1121 void TemplateTable::istore() {
1122 transition(itos, vtos);
1123 const Register Rlocal_index = R2_tmp;
1124
1125 locals_index(Rlocal_index);
1126 Address local = load_iaddress(Rlocal_index, Rtemp);
1127 __ str_32(R0_tos, local);
1128 }
1129
1130
lstore()1131 void TemplateTable::lstore() {
1132 transition(ltos, vtos);
1133 const Register Rlocal_index = R2_tmp;
1134
1135 locals_index(Rlocal_index);
1136 store_category2_local(Rlocal_index, R3_tmp);
1137 }
1138
1139
fstore()1140 void TemplateTable::fstore() {
1141 transition(ftos, vtos);
1142 const Register Rlocal_index = R2_tmp;
1143
1144 locals_index(Rlocal_index);
1145 Address local = load_faddress(Rlocal_index, Rtemp);
1146 #ifdef __SOFTFP__
1147 __ str(R0_tos, local);
1148 #else
1149 __ str_float(S0_tos, local);
1150 #endif // __SOFTFP__
1151 }
1152
1153
dstore()1154 void TemplateTable::dstore() {
1155 transition(dtos, vtos);
1156 const Register Rlocal_index = R2_tmp;
1157
1158 locals_index(Rlocal_index);
1159
1160 #ifdef __SOFTFP__
1161 store_category2_local(Rlocal_index, R3_tmp);
1162 #else
1163 __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1164 #endif // __SOFTFP__
1165 }
1166
1167
astore()1168 void TemplateTable::astore() {
1169 transition(vtos, vtos);
1170 const Register Rlocal_index = R1_tmp;
1171
1172 __ pop_ptr(R0_tos);
1173 locals_index(Rlocal_index);
1174 Address local = load_aaddress(Rlocal_index, Rtemp);
1175 __ str(R0_tos, local);
1176 }
1177
1178
wide_istore()1179 void TemplateTable::wide_istore() {
1180 transition(vtos, vtos);
1181 const Register Rlocal_index = R2_tmp;
1182
1183 __ pop_i(R0_tos);
1184 locals_index_wide(Rlocal_index);
1185 Address local = load_iaddress(Rlocal_index, Rtemp);
1186 __ str_32(R0_tos, local);
1187 }
1188
1189
wide_lstore()1190 void TemplateTable::wide_lstore() {
1191 transition(vtos, vtos);
1192 const Register Rlocal_index = R2_tmp;
1193 const Register Rlocal_base = R3_tmp;
1194
1195 #ifdef AARCH64
1196 __ pop_l(R0_tos);
1197 #else
1198 __ pop_l(R0_tos_lo, R1_tos_hi);
1199 #endif // AARCH64
1200
1201 locals_index_wide(Rlocal_index);
1202 store_category2_local(Rlocal_index, R3_tmp);
1203 }
1204
1205
wide_fstore()1206 void TemplateTable::wide_fstore() {
1207 wide_istore();
1208 }
1209
1210
wide_dstore()1211 void TemplateTable::wide_dstore() {
1212 wide_lstore();
1213 }
1214
1215
wide_astore()1216 void TemplateTable::wide_astore() {
1217 transition(vtos, vtos);
1218 const Register Rlocal_index = R2_tmp;
1219
1220 __ pop_ptr(R0_tos);
1221 locals_index_wide(Rlocal_index);
1222 Address local = load_aaddress(Rlocal_index, Rtemp);
1223 __ str(R0_tos, local);
1224 }
1225
1226
iastore()1227 void TemplateTable::iastore() {
1228 transition(itos, vtos);
1229 const Register Rindex = R4_tmp; // index_check prefers index in R4
1230 const Register Rarray = R3_tmp;
1231 // R0_tos: value
1232
1233 __ pop_i(Rindex);
1234 index_check(Rarray, Rindex);
1235 __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
1236 }
1237
1238
lastore()1239 void TemplateTable::lastore() {
1240 transition(ltos, vtos);
1241 const Register Rindex = R4_tmp; // index_check prefers index in R4
1242 const Register Rarray = R3_tmp;
1243 // R0_tos_lo:R1_tos_hi: value
1244
1245 __ pop_i(Rindex);
1246 index_check(Rarray, Rindex);
1247
1248 #ifdef AARCH64
1249 __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
1250 #else
1251 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1252 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
1253 __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1254 #endif // AARCH64
1255 }
1256
1257
fastore()1258 void TemplateTable::fastore() {
1259 transition(ftos, vtos);
1260 const Register Rindex = R4_tmp; // index_check prefers index in R4
1261 const Register Rarray = R3_tmp;
1262 // S0_tos/R0_tos: value
1263
1264 __ pop_i(Rindex);
1265 index_check(Rarray, Rindex);
1266 Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
1267
1268 #ifdef __SOFTFP__
1269 __ str(R0_tos, addr);
1270 #else
1271 __ str_float(S0_tos, addr);
1272 #endif // __SOFTFP__
1273 }
1274
1275
dastore()1276 void TemplateTable::dastore() {
1277 transition(dtos, vtos);
1278 const Register Rindex = R4_tmp; // index_check prefers index in R4
1279 const Register Rarray = R3_tmp;
1280 // D0_tos / R0_tos_lo:R1_to_hi: value
1281
1282 __ pop_i(Rindex);
1283 index_check(Rarray, Rindex);
1284
1285 #ifdef __SOFTFP__
1286 __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
1287 __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
1288 __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
1289 #else
1290 __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
1291 #endif // __SOFTFP__
1292 }
1293
1294
aastore()1295 void TemplateTable::aastore() {
1296 transition(vtos, vtos);
1297 Label is_null, throw_array_store, done;
1298
1299 const Register Raddr_1 = R1_tmp;
1300 const Register Rvalue_2 = R2_tmp;
1301 const Register Rarray_3 = R3_tmp;
1302 const Register Rindex_4 = R4_tmp; // preferred by index_check_without_pop()
1303 const Register Rsub_5 = R5_tmp;
1304 const Register Rsuper_LR = LR_tmp;
1305
1306 // stack: ..., array, index, value
1307 __ ldr(Rvalue_2, at_tos()); // Value
1308 __ ldr_s32(Rindex_4, at_tos_p1()); // Index
1309 __ ldr(Rarray_3, at_tos_p2()); // Array
1310
1311 index_check_without_pop(Rarray_3, Rindex_4);
1312
1313 // Compute the array base
1314 __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1315
1316 // do array store check - check for NULL value first
1317 __ cbz(Rvalue_2, is_null);
1318
1319 // Load subklass
1320 __ load_klass(Rsub_5, Rvalue_2);
1321 // Load superklass
1322 __ load_klass(Rtemp, Rarray_3);
1323 __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1324
1325 __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1326 // Come here on success
1327
1328 // Store value
1329 __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1330
1331 // Now store using the appropriate barrier
1332 do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IS_ARRAY);
1333 __ b(done);
1334
1335 __ bind(throw_array_store);
1336
1337 // Come here on failure of subtype check
1338 __ profile_typecheck_failed(R0_tmp);
1339
1340 // object is at TOS
1341 __ b(Interpreter::_throw_ArrayStoreException_entry);
1342
1343 // Have a NULL in Rvalue_2, store NULL at array[index].
1344 __ bind(is_null);
1345 __ profile_null_seen(R0_tmp);
1346
1347 // Store a NULL
1348 do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY);
1349
1350 // Pop stack arguments
1351 __ bind(done);
1352 __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1353 }
1354
1355
bastore()1356 void TemplateTable::bastore() {
1357 transition(itos, vtos);
1358 const Register Rindex = R4_tmp; // index_check prefers index in R4
1359 const Register Rarray = R3_tmp;
1360 // R0_tos: value
1361
1362 __ pop_i(Rindex);
1363 index_check(Rarray, Rindex);
1364
1365 // Need to check whether array is boolean or byte
1366 // since both types share the bastore bytecode.
1367 __ load_klass(Rtemp, Rarray);
1368 __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1369 Label L_skip;
1370 __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1371 __ b(L_skip, eq);
1372 __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1373 __ bind(L_skip);
1374 __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
1375 }
1376
1377
castore()1378 void TemplateTable::castore() {
1379 transition(itos, vtos);
1380 const Register Rindex = R4_tmp; // index_check prefers index in R4
1381 const Register Rarray = R3_tmp;
1382 // R0_tos: value
1383
1384 __ pop_i(Rindex);
1385 index_check(Rarray, Rindex);
1386
1387 __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
1388 }
1389
1390
sastore()1391 void TemplateTable::sastore() {
1392 assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1393 arrayOopDesc::base_offset_in_bytes(T_SHORT),
1394 "base offsets for char and short should be equal");
1395 castore();
1396 }
1397
1398
istore(int n)1399 void TemplateTable::istore(int n) {
1400 transition(itos, vtos);
1401 __ str_32(R0_tos, iaddress(n));
1402 }
1403
1404
lstore(int n)1405 void TemplateTable::lstore(int n) {
1406 transition(ltos, vtos);
1407 #ifdef AARCH64
1408 __ str(R0_tos, laddress(n));
1409 #else
1410 __ str(R0_tos_lo, laddress(n));
1411 __ str(R1_tos_hi, haddress(n));
1412 #endif // AARCH64
1413 }
1414
1415
fstore(int n)1416 void TemplateTable::fstore(int n) {
1417 transition(ftos, vtos);
1418 #ifdef __SOFTFP__
1419 __ str(R0_tos, faddress(n));
1420 #else
1421 __ str_float(S0_tos, faddress(n));
1422 #endif // __SOFTFP__
1423 }
1424
1425
dstore(int n)1426 void TemplateTable::dstore(int n) {
1427 transition(dtos, vtos);
1428 #ifdef __SOFTFP__
1429 __ str(R0_tos_lo, laddress(n));
1430 __ str(R1_tos_hi, haddress(n));
1431 #else
1432 __ str_double(D0_tos, daddress(n));
1433 #endif // __SOFTFP__
1434 }
1435
1436
astore(int n)1437 void TemplateTable::astore(int n) {
1438 transition(vtos, vtos);
1439 __ pop_ptr(R0_tos);
1440 __ str(R0_tos, aaddress(n));
1441 }
1442
1443
pop()1444 void TemplateTable::pop() {
1445 transition(vtos, vtos);
1446 __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1447 }
1448
1449
pop2()1450 void TemplateTable::pop2() {
1451 transition(vtos, vtos);
1452 __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1453 }
1454
1455
dup()1456 void TemplateTable::dup() {
1457 transition(vtos, vtos);
1458 // stack: ..., a
1459 __ load_ptr(0, R0_tmp);
1460 __ push_ptr(R0_tmp);
1461 // stack: ..., a, a
1462 }
1463
1464
dup_x1()1465 void TemplateTable::dup_x1() {
1466 transition(vtos, vtos);
1467 // stack: ..., a, b
1468 __ load_ptr(0, R0_tmp); // load b
1469 __ load_ptr(1, R2_tmp); // load a
1470 __ store_ptr(1, R0_tmp); // store b
1471 __ store_ptr(0, R2_tmp); // store a
1472 __ push_ptr(R0_tmp); // push b
1473 // stack: ..., b, a, b
1474 }
1475
1476
dup_x2()1477 void TemplateTable::dup_x2() {
1478 transition(vtos, vtos);
1479 // stack: ..., a, b, c
1480 __ load_ptr(0, R0_tmp); // load c
1481 __ load_ptr(1, R2_tmp); // load b
1482 __ load_ptr(2, R4_tmp); // load a
1483
1484 __ push_ptr(R0_tmp); // push c
1485
1486 // stack: ..., a, b, c, c
1487 __ store_ptr(1, R2_tmp); // store b
1488 __ store_ptr(2, R4_tmp); // store a
1489 __ store_ptr(3, R0_tmp); // store c
1490 // stack: ..., c, a, b, c
1491 }
1492
1493
dup2()1494 void TemplateTable::dup2() {
1495 transition(vtos, vtos);
1496 // stack: ..., a, b
1497 __ load_ptr(1, R0_tmp); // load a
1498 __ push_ptr(R0_tmp); // push a
1499 __ load_ptr(1, R0_tmp); // load b
1500 __ push_ptr(R0_tmp); // push b
1501 // stack: ..., a, b, a, b
1502 }
1503
1504
dup2_x1()1505 void TemplateTable::dup2_x1() {
1506 transition(vtos, vtos);
1507
1508 // stack: ..., a, b, c
1509 __ load_ptr(0, R4_tmp); // load c
1510 __ load_ptr(1, R2_tmp); // load b
1511 __ load_ptr(2, R0_tmp); // load a
1512
1513 __ push_ptr(R2_tmp); // push b
1514 __ push_ptr(R4_tmp); // push c
1515
1516 // stack: ..., a, b, c, b, c
1517
1518 __ store_ptr(2, R0_tmp); // store a
1519 __ store_ptr(3, R4_tmp); // store c
1520 __ store_ptr(4, R2_tmp); // store b
1521
1522 // stack: ..., b, c, a, b, c
1523 }
1524
1525
dup2_x2()1526 void TemplateTable::dup2_x2() {
1527 transition(vtos, vtos);
1528 // stack: ..., a, b, c, d
1529 __ load_ptr(0, R0_tmp); // load d
1530 __ load_ptr(1, R2_tmp); // load c
1531 __ push_ptr(R2_tmp); // push c
1532 __ push_ptr(R0_tmp); // push d
1533 // stack: ..., a, b, c, d, c, d
1534 __ load_ptr(4, R4_tmp); // load b
1535 __ store_ptr(4, R0_tmp); // store d in b
1536 __ store_ptr(2, R4_tmp); // store b in d
1537 // stack: ..., a, d, c, b, c, d
1538 __ load_ptr(5, R4_tmp); // load a
1539 __ store_ptr(5, R2_tmp); // store c in a
1540 __ store_ptr(3, R4_tmp); // store a in c
1541 // stack: ..., c, d, a, b, c, d
1542 }
1543
1544
swap()1545 void TemplateTable::swap() {
1546 transition(vtos, vtos);
1547 // stack: ..., a, b
1548 __ load_ptr(1, R0_tmp); // load a
1549 __ load_ptr(0, R2_tmp); // load b
1550 __ store_ptr(0, R0_tmp); // store a in b
1551 __ store_ptr(1, R2_tmp); // store b in a
1552 // stack: ..., b, a
1553 }
1554
1555
iop2(Operation op)1556 void TemplateTable::iop2(Operation op) {
1557 transition(itos, itos);
1558 const Register arg1 = R1_tmp;
1559 const Register arg2 = R0_tos;
1560
1561 __ pop_i(arg1);
1562 switch (op) {
1563 case add : __ add_32 (R0_tos, arg1, arg2); break;
1564 case sub : __ sub_32 (R0_tos, arg1, arg2); break;
1565 case mul : __ mul_32 (R0_tos, arg1, arg2); break;
1566 case _and : __ and_32 (R0_tos, arg1, arg2); break;
1567 case _or : __ orr_32 (R0_tos, arg1, arg2); break;
1568 case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1569 #ifdef AARCH64
1570 case shl : __ lslv_w (R0_tos, arg1, arg2); break;
1571 case shr : __ asrv_w (R0_tos, arg1, arg2); break;
1572 case ushr : __ lsrv_w (R0_tos, arg1, arg2); break;
1573 #else
1574 case shl : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1575 case shr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1576 case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1577 #endif // AARCH64
1578 default : ShouldNotReachHere();
1579 }
1580 }
1581
1582
lop2(Operation op)1583 void TemplateTable::lop2(Operation op) {
1584 transition(ltos, ltos);
1585 #ifdef AARCH64
1586 const Register arg1 = R1_tmp;
1587 const Register arg2 = R0_tos;
1588
1589 __ pop_l(arg1);
1590 switch (op) {
1591 case add : __ add (R0_tos, arg1, arg2); break;
1592 case sub : __ sub (R0_tos, arg1, arg2); break;
1593 case _and : __ andr(R0_tos, arg1, arg2); break;
1594 case _or : __ orr (R0_tos, arg1, arg2); break;
1595 case _xor : __ eor (R0_tos, arg1, arg2); break;
1596 default : ShouldNotReachHere();
1597 }
1598 #else
1599 const Register arg1_lo = R2_tmp;
1600 const Register arg1_hi = R3_tmp;
1601 const Register arg2_lo = R0_tos_lo;
1602 const Register arg2_hi = R1_tos_hi;
1603
1604 __ pop_l(arg1_lo, arg1_hi);
1605 switch (op) {
1606 case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1607 case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1608 case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1609 case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1610 case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1611 default : ShouldNotReachHere();
1612 }
1613 #endif // AARCH64
1614 }
1615
1616
idiv()1617 void TemplateTable::idiv() {
1618 transition(itos, itos);
1619 #ifdef AARCH64
1620 const Register divisor = R0_tos;
1621 const Register dividend = R1_tmp;
1622
1623 __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1624 __ pop_i(dividend);
1625 __ sdiv_w(R0_tos, dividend, divisor);
1626 #else
1627 __ mov(R2, R0_tos);
1628 __ pop_i(R0);
1629 // R0 - dividend
1630 // R2 - divisor
1631 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1632 // R1 - result
1633 __ mov(R0_tos, R1);
1634 #endif // AARCH64
1635 }
1636
1637
irem()1638 void TemplateTable::irem() {
1639 transition(itos, itos);
1640 #ifdef AARCH64
1641 const Register divisor = R0_tos;
1642 const Register dividend = R1_tmp;
1643 const Register quotient = R2_tmp;
1644
1645 __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry);
1646 __ pop_i(dividend);
1647 __ sdiv_w(quotient, dividend, divisor);
1648 __ msub_w(R0_tos, divisor, quotient, dividend);
1649 #else
1650 __ mov(R2, R0_tos);
1651 __ pop_i(R0);
1652 // R0 - dividend
1653 // R2 - divisor
1654 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1655 // R0 - remainder
1656 #endif // AARCH64
1657 }
1658
1659
lmul()1660 void TemplateTable::lmul() {
1661 transition(ltos, ltos);
1662 #ifdef AARCH64
1663 const Register arg1 = R0_tos;
1664 const Register arg2 = R1_tmp;
1665
1666 __ pop_l(arg2);
1667 __ mul(R0_tos, arg1, arg2);
1668 #else
1669 const Register arg1_lo = R0_tos_lo;
1670 const Register arg1_hi = R1_tos_hi;
1671 const Register arg2_lo = R2_tmp;
1672 const Register arg2_hi = R3_tmp;
1673
1674 __ pop_l(arg2_lo, arg2_hi);
1675
1676 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1677 #endif // AARCH64
1678 }
1679
1680
ldiv()1681 void TemplateTable::ldiv() {
1682 transition(ltos, ltos);
1683 #ifdef AARCH64
1684 const Register divisor = R0_tos;
1685 const Register dividend = R1_tmp;
1686
1687 __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1688 __ pop_l(dividend);
1689 __ sdiv(R0_tos, dividend, divisor);
1690 #else
1691 const Register x_lo = R2_tmp;
1692 const Register x_hi = R3_tmp;
1693 const Register y_lo = R0_tos_lo;
1694 const Register y_hi = R1_tos_hi;
1695
1696 __ pop_l(x_lo, x_hi);
1697
1698 // check if y = 0
1699 __ orrs(Rtemp, y_lo, y_hi);
1700 __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1701 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1702 #endif // AARCH64
1703 }
1704
1705
lrem()1706 void TemplateTable::lrem() {
1707 transition(ltos, ltos);
1708 #ifdef AARCH64
1709 const Register divisor = R0_tos;
1710 const Register dividend = R1_tmp;
1711 const Register quotient = R2_tmp;
1712
1713 __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry);
1714 __ pop_l(dividend);
1715 __ sdiv(quotient, dividend, divisor);
1716 __ msub(R0_tos, divisor, quotient, dividend);
1717 #else
1718 const Register x_lo = R2_tmp;
1719 const Register x_hi = R3_tmp;
1720 const Register y_lo = R0_tos_lo;
1721 const Register y_hi = R1_tos_hi;
1722
1723 __ pop_l(x_lo, x_hi);
1724
1725 // check if y = 0
1726 __ orrs(Rtemp, y_lo, y_hi);
1727 __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1728 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1729 #endif // AARCH64
1730 }
1731
1732
lshl()1733 void TemplateTable::lshl() {
1734 transition(itos, ltos);
1735 #ifdef AARCH64
1736 const Register val = R1_tmp;
1737 const Register shift_cnt = R0_tos;
1738 __ pop_l(val);
1739 __ lslv(R0_tos, val, shift_cnt);
1740 #else
1741 const Register shift_cnt = R4_tmp;
1742 const Register val_lo = R2_tmp;
1743 const Register val_hi = R3_tmp;
1744
1745 __ pop_l(val_lo, val_hi);
1746 __ andr(shift_cnt, R0_tos, 63);
1747 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1748 #endif // AARCH64
1749 }
1750
1751
lshr()1752 void TemplateTable::lshr() {
1753 transition(itos, ltos);
1754 #ifdef AARCH64
1755 const Register val = R1_tmp;
1756 const Register shift_cnt = R0_tos;
1757 __ pop_l(val);
1758 __ asrv(R0_tos, val, shift_cnt);
1759 #else
1760 const Register shift_cnt = R4_tmp;
1761 const Register val_lo = R2_tmp;
1762 const Register val_hi = R3_tmp;
1763
1764 __ pop_l(val_lo, val_hi);
1765 __ andr(shift_cnt, R0_tos, 63);
1766 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1767 #endif // AARCH64
1768 }
1769
1770
lushr()1771 void TemplateTable::lushr() {
1772 transition(itos, ltos);
1773 #ifdef AARCH64
1774 const Register val = R1_tmp;
1775 const Register shift_cnt = R0_tos;
1776 __ pop_l(val);
1777 __ lsrv(R0_tos, val, shift_cnt);
1778 #else
1779 const Register shift_cnt = R4_tmp;
1780 const Register val_lo = R2_tmp;
1781 const Register val_hi = R3_tmp;
1782
1783 __ pop_l(val_lo, val_hi);
1784 __ andr(shift_cnt, R0_tos, 63);
1785 __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1786 #endif // AARCH64
1787 }
1788
1789
fop2(Operation op)1790 void TemplateTable::fop2(Operation op) {
1791 transition(ftos, ftos);
1792 #ifdef __SOFTFP__
1793 __ mov(R1, R0_tos);
1794 __ pop_i(R0);
1795 switch (op) {
1796 case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1797 case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1798 case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1799 case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1800 case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1801 default : ShouldNotReachHere();
1802 }
1803 #else
1804 const FloatRegister arg1 = S1_tmp;
1805 const FloatRegister arg2 = S0_tos;
1806
1807 switch (op) {
1808 case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1809 case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1810 case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1811 case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1812 case rem:
1813 #ifndef __ABI_HARD__
1814 __ pop_f(arg1);
1815 __ fmrs(R0, arg1);
1816 __ fmrs(R1, arg2);
1817 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1818 __ fmsr(S0_tos, R0);
1819 #else
1820 __ mov_float(S1_reg, arg2);
1821 __ pop_f(S0);
1822 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1823 #endif // !__ABI_HARD__
1824 break;
1825 default : ShouldNotReachHere();
1826 }
1827 #endif // __SOFTFP__
1828 }
1829
1830
dop2(Operation op)1831 void TemplateTable::dop2(Operation op) {
1832 transition(dtos, dtos);
1833 #ifdef __SOFTFP__
1834 __ mov(R2, R0_tos_lo);
1835 __ mov(R3, R1_tos_hi);
1836 __ pop_l(R0, R1);
1837 switch (op) {
1838 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1839 case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1840 case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1841 case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1842 case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1843 case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1844 default : ShouldNotReachHere();
1845 }
1846 #else
1847 const FloatRegister arg1 = D1_tmp;
1848 const FloatRegister arg2 = D0_tos;
1849
1850 switch (op) {
1851 case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1852 case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1853 case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1854 case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1855 case rem:
1856 #ifndef __ABI_HARD__
1857 __ pop_d(arg1);
1858 __ fmrrd(R0, R1, arg1);
1859 __ fmrrd(R2, R3, arg2);
1860 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1861 __ fmdrr(D0_tos, R0, R1);
1862 #else
1863 __ mov_double(D1, arg2);
1864 __ pop_d(D0);
1865 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1866 #endif // !__ABI_HARD__
1867 break;
1868 default : ShouldNotReachHere();
1869 }
1870 #endif // __SOFTFP__
1871 }
1872
1873
ineg()1874 void TemplateTable::ineg() {
1875 transition(itos, itos);
1876 __ neg_32(R0_tos, R0_tos);
1877 }
1878
1879
lneg()1880 void TemplateTable::lneg() {
1881 transition(ltos, ltos);
1882 #ifdef AARCH64
1883 __ neg(R0_tos, R0_tos);
1884 #else
1885 __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1886 __ rsc (R1_tos_hi, R1_tos_hi, 0);
1887 #endif // AARCH64
1888 }
1889
1890
fneg()1891 void TemplateTable::fneg() {
1892 transition(ftos, ftos);
1893 #ifdef __SOFTFP__
1894 // Invert sign bit
1895 const int sign_mask = 0x80000000;
1896 __ eor(R0_tos, R0_tos, sign_mask);
1897 #else
1898 __ neg_float(S0_tos, S0_tos);
1899 #endif // __SOFTFP__
1900 }
1901
1902
dneg()1903 void TemplateTable::dneg() {
1904 transition(dtos, dtos);
1905 #ifdef __SOFTFP__
1906 // Invert sign bit in the high part of the double
1907 const int sign_mask_hi = 0x80000000;
1908 __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1909 #else
1910 __ neg_double(D0_tos, D0_tos);
1911 #endif // __SOFTFP__
1912 }
1913
1914
iinc()1915 void TemplateTable::iinc() {
1916 transition(vtos, vtos);
1917 const Register Rconst = R2_tmp;
1918 const Register Rlocal_index = R1_tmp;
1919 const Register Rval = R0_tmp;
1920
1921 __ ldrsb(Rconst, at_bcp(2));
1922 locals_index(Rlocal_index);
1923 Address local = load_iaddress(Rlocal_index, Rtemp);
1924 __ ldr_s32(Rval, local);
1925 __ add(Rval, Rval, Rconst);
1926 __ str_32(Rval, local);
1927 }
1928
1929
wide_iinc()1930 void TemplateTable::wide_iinc() {
1931 transition(vtos, vtos);
1932 const Register Rconst = R2_tmp;
1933 const Register Rlocal_index = R1_tmp;
1934 const Register Rval = R0_tmp;
1935
1936 // get constant in Rconst
1937 __ ldrsb(R2_tmp, at_bcp(4));
1938 __ ldrb(R3_tmp, at_bcp(5));
1939 __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1940
1941 locals_index_wide(Rlocal_index);
1942 Address local = load_iaddress(Rlocal_index, Rtemp);
1943 __ ldr_s32(Rval, local);
1944 __ add(Rval, Rval, Rconst);
1945 __ str_32(Rval, local);
1946 }
1947
1948
convert()1949 void TemplateTable::convert() {
1950 // Checking
1951 #ifdef ASSERT
1952 { TosState tos_in = ilgl;
1953 TosState tos_out = ilgl;
1954 switch (bytecode()) {
1955 case Bytecodes::_i2l: // fall through
1956 case Bytecodes::_i2f: // fall through
1957 case Bytecodes::_i2d: // fall through
1958 case Bytecodes::_i2b: // fall through
1959 case Bytecodes::_i2c: // fall through
1960 case Bytecodes::_i2s: tos_in = itos; break;
1961 case Bytecodes::_l2i: // fall through
1962 case Bytecodes::_l2f: // fall through
1963 case Bytecodes::_l2d: tos_in = ltos; break;
1964 case Bytecodes::_f2i: // fall through
1965 case Bytecodes::_f2l: // fall through
1966 case Bytecodes::_f2d: tos_in = ftos; break;
1967 case Bytecodes::_d2i: // fall through
1968 case Bytecodes::_d2l: // fall through
1969 case Bytecodes::_d2f: tos_in = dtos; break;
1970 default : ShouldNotReachHere();
1971 }
1972 switch (bytecode()) {
1973 case Bytecodes::_l2i: // fall through
1974 case Bytecodes::_f2i: // fall through
1975 case Bytecodes::_d2i: // fall through
1976 case Bytecodes::_i2b: // fall through
1977 case Bytecodes::_i2c: // fall through
1978 case Bytecodes::_i2s: tos_out = itos; break;
1979 case Bytecodes::_i2l: // fall through
1980 case Bytecodes::_f2l: // fall through
1981 case Bytecodes::_d2l: tos_out = ltos; break;
1982 case Bytecodes::_i2f: // fall through
1983 case Bytecodes::_l2f: // fall through
1984 case Bytecodes::_d2f: tos_out = ftos; break;
1985 case Bytecodes::_i2d: // fall through
1986 case Bytecodes::_l2d: // fall through
1987 case Bytecodes::_f2d: tos_out = dtos; break;
1988 default : ShouldNotReachHere();
1989 }
1990 transition(tos_in, tos_out);
1991 }
1992 #endif // ASSERT
1993
1994 // Conversion
1995 switch (bytecode()) {
1996 case Bytecodes::_i2l:
1997 #ifdef AARCH64
1998 __ sign_extend(R0_tos, R0_tos, 32);
1999 #else
2000 __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
2001 #endif // AARCH64
2002 break;
2003
2004 case Bytecodes::_i2f:
2005 #ifdef AARCH64
2006 __ scvtf_sw(S0_tos, R0_tos);
2007 #else
2008 #ifdef __SOFTFP__
2009 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
2010 #else
2011 __ fmsr(S0_tmp, R0_tos);
2012 __ fsitos(S0_tos, S0_tmp);
2013 #endif // __SOFTFP__
2014 #endif // AARCH64
2015 break;
2016
2017 case Bytecodes::_i2d:
2018 #ifdef AARCH64
2019 __ scvtf_dw(D0_tos, R0_tos);
2020 #else
2021 #ifdef __SOFTFP__
2022 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
2023 #else
2024 __ fmsr(S0_tmp, R0_tos);
2025 __ fsitod(D0_tos, S0_tmp);
2026 #endif // __SOFTFP__
2027 #endif // AARCH64
2028 break;
2029
2030 case Bytecodes::_i2b:
2031 __ sign_extend(R0_tos, R0_tos, 8);
2032 break;
2033
2034 case Bytecodes::_i2c:
2035 __ zero_extend(R0_tos, R0_tos, 16);
2036 break;
2037
2038 case Bytecodes::_i2s:
2039 __ sign_extend(R0_tos, R0_tos, 16);
2040 break;
2041
2042 case Bytecodes::_l2i:
2043 /* nothing to do */
2044 break;
2045
2046 case Bytecodes::_l2f:
2047 #ifdef AARCH64
2048 __ scvtf_sx(S0_tos, R0_tos);
2049 #else
2050 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
2051 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2052 __ fmsr(S0_tos, R0);
2053 #endif // !__SOFTFP__ && !__ABI_HARD__
2054 #endif // AARCH64
2055 break;
2056
2057 case Bytecodes::_l2d:
2058 #ifdef AARCH64
2059 __ scvtf_dx(D0_tos, R0_tos);
2060 #else
2061 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
2062 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
2063 __ fmdrr(D0_tos, R0, R1);
2064 #endif // !__SOFTFP__ && !__ABI_HARD__
2065 #endif // AARCH64
2066 break;
2067
2068 case Bytecodes::_f2i:
2069 #ifdef AARCH64
2070 __ fcvtzs_ws(R0_tos, S0_tos);
2071 #else
2072 #ifndef __SOFTFP__
2073 __ ftosizs(S0_tos, S0_tos);
2074 __ fmrs(R0_tos, S0_tos);
2075 #else
2076 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
2077 #endif // !__SOFTFP__
2078 #endif // AARCH64
2079 break;
2080
2081 case Bytecodes::_f2l:
2082 #ifdef AARCH64
2083 __ fcvtzs_xs(R0_tos, S0_tos);
2084 #else
2085 #ifndef __SOFTFP__
2086 __ fmrs(R0_tos, S0_tos);
2087 #endif // !__SOFTFP__
2088 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
2089 #endif // AARCH64
2090 break;
2091
2092 case Bytecodes::_f2d:
2093 #ifdef __SOFTFP__
2094 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
2095 #else
2096 __ convert_f2d(D0_tos, S0_tos);
2097 #endif // __SOFTFP__
2098 break;
2099
2100 case Bytecodes::_d2i:
2101 #ifdef AARCH64
2102 __ fcvtzs_wd(R0_tos, D0_tos);
2103 #else
2104 #ifndef __SOFTFP__
2105 __ ftosizd(Stemp, D0);
2106 __ fmrs(R0, Stemp);
2107 #else
2108 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
2109 #endif // !__SOFTFP__
2110 #endif // AARCH64
2111 break;
2112
2113 case Bytecodes::_d2l:
2114 #ifdef AARCH64
2115 __ fcvtzs_xd(R0_tos, D0_tos);
2116 #else
2117 #ifndef __SOFTFP__
2118 __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
2119 #endif // !__SOFTFP__
2120 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
2121 #endif // AARCH64
2122 break;
2123
2124 case Bytecodes::_d2f:
2125 #ifdef __SOFTFP__
2126 __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
2127 #else
2128 __ convert_d2f(S0_tos, D0_tos);
2129 #endif // __SOFTFP__
2130 break;
2131
2132 default:
2133 ShouldNotReachHere();
2134 }
2135 }
2136
2137
lcmp()2138 void TemplateTable::lcmp() {
2139 transition(ltos, itos);
2140 #ifdef AARCH64
2141 const Register arg1 = R1_tmp;
2142 const Register arg2 = R0_tos;
2143
2144 __ pop_l(arg1);
2145
2146 __ cmp(arg1, arg2);
2147 __ cset(R0_tos, gt); // 1 if '>', else 0
2148 __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1
2149 #else
2150 const Register arg1_lo = R2_tmp;
2151 const Register arg1_hi = R3_tmp;
2152 const Register arg2_lo = R0_tos_lo;
2153 const Register arg2_hi = R1_tos_hi;
2154 const Register res = R4_tmp;
2155
2156 __ pop_l(arg1_lo, arg1_hi);
2157
2158 // long compare arg1 with arg2
2159 // result is -1/0/+1 if '<'/'='/'>'
2160 Label done;
2161
2162 __ mov (res, 0);
2163 __ cmp (arg1_hi, arg2_hi);
2164 __ mvn (res, 0, lt);
2165 __ mov (res, 1, gt);
2166 __ b(done, ne);
2167 __ cmp (arg1_lo, arg2_lo);
2168 __ mvn (res, 0, lo);
2169 __ mov (res, 1, hi);
2170 __ bind(done);
2171 __ mov (R0_tos, res);
2172 #endif // AARCH64
2173 }
2174
2175
float_cmp(bool is_float,int unordered_result)2176 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2177 assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
2178
2179 #ifdef AARCH64
2180 if (is_float) {
2181 transition(ftos, itos);
2182 __ pop_f(S1_tmp);
2183 __ fcmp_s(S1_tmp, S0_tos);
2184 } else {
2185 transition(dtos, itos);
2186 __ pop_d(D1_tmp);
2187 __ fcmp_d(D1_tmp, D0_tos);
2188 }
2189
2190 if (unordered_result < 0) {
2191 __ cset(R0_tos, gt); // 1 if '>', else 0
2192 __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1
2193 } else {
2194 __ cset(R0_tos, hi); // 1 if '>' or unordered, else 0
2195 __ csinv(R0_tos, R0_tos, ZR, pl); // previous value if '>=' or unordered, else -1
2196 }
2197
2198 #else
2199
2200 #ifdef __SOFTFP__
2201
2202 if (is_float) {
2203 transition(ftos, itos);
2204 const Register Rx = R0;
2205 const Register Ry = R1;
2206
2207 __ mov(Ry, R0_tos);
2208 __ pop_i(Rx);
2209
2210 if (unordered_result == 1) {
2211 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
2212 } else {
2213 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
2214 }
2215
2216 } else {
2217
2218 transition(dtos, itos);
2219 const Register Rx_lo = R0;
2220 const Register Rx_hi = R1;
2221 const Register Ry_lo = R2;
2222 const Register Ry_hi = R3;
2223
2224 __ mov(Ry_lo, R0_tos_lo);
2225 __ mov(Ry_hi, R1_tos_hi);
2226 __ pop_l(Rx_lo, Rx_hi);
2227
2228 if (unordered_result == 1) {
2229 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2230 } else {
2231 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
2232 }
2233 }
2234
2235 #else
2236
2237 if (is_float) {
2238 transition(ftos, itos);
2239 __ pop_f(S1_tmp);
2240 __ fcmps(S1_tmp, S0_tos);
2241 } else {
2242 transition(dtos, itos);
2243 __ pop_d(D1_tmp);
2244 __ fcmpd(D1_tmp, D0_tos);
2245 }
2246
2247 __ fmstat();
2248
2249 // comparison result | flag N | flag Z | flag C | flag V
2250 // "<" | 1 | 0 | 0 | 0
2251 // "==" | 0 | 1 | 1 | 0
2252 // ">" | 0 | 0 | 1 | 0
2253 // unordered | 0 | 0 | 1 | 1
2254
2255 if (unordered_result < 0) {
2256 __ mov(R0_tos, 1); // result == 1 if greater
2257 __ mvn(R0_tos, 0, lt); // result == -1 if less or unordered (N!=V)
2258 } else {
2259 __ mov(R0_tos, 1); // result == 1 if greater or unordered
2260 __ mvn(R0_tos, 0, mi); // result == -1 if less (N=1)
2261 }
2262 __ mov(R0_tos, 0, eq); // result == 0 if equ (Z=1)
2263 #endif // __SOFTFP__
2264 #endif // AARCH64
2265 }
2266
2267
branch(bool is_jsr,bool is_wide)2268 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2269
2270 const Register Rdisp = R0_tmp;
2271 const Register Rbumped_taken_count = R5_tmp;
2272
2273 __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2274
2275 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2276 InvocationCounter::counter_offset();
2277 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2278 InvocationCounter::counter_offset();
2279 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2280
2281 // Load up R0 with the branch displacement
2282 if (is_wide) {
2283 __ ldrsb(R0_tmp, at_bcp(1));
2284 __ ldrb(R1_tmp, at_bcp(2));
2285 __ ldrb(R2_tmp, at_bcp(3));
2286 __ ldrb(R3_tmp, at_bcp(4));
2287 __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2288 __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2289 __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2290 } else {
2291 __ ldrsb(R0_tmp, at_bcp(1));
2292 __ ldrb(R1_tmp, at_bcp(2));
2293 __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2294 }
2295
2296 // Handle all the JSR stuff here, then exit.
2297 // It's much shorter and cleaner than intermingling with the
2298 // non-JSR normal-branch stuff occuring below.
2299 if (is_jsr) {
2300 // compute return address as bci in R1
2301 const Register Rret_addr = R1_tmp;
2302 assert_different_registers(Rdisp, Rret_addr, Rtemp);
2303
2304 __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2305 __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2306 __ sub(Rret_addr, Rret_addr, Rtemp);
2307
2308 // Load the next target bytecode into R3_bytecode and advance Rbcp
2309 #ifdef AARCH64
2310 __ add(Rbcp, Rbcp, Rdisp);
2311 __ ldrb(R3_bytecode, Address(Rbcp));
2312 #else
2313 __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2314 #endif // AARCH64
2315
2316 // Push return address
2317 __ push_i(Rret_addr);
2318 // jsr returns vtos
2319 __ dispatch_only_noverify(vtos);
2320 return;
2321 }
2322
2323 // Normal (non-jsr) branch handling
2324
2325 // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2326 #ifdef AARCH64
2327 __ add(Rbcp, Rbcp, Rdisp);
2328 __ ldrb(R3_bytecode, Address(Rbcp));
2329 #else
2330 __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2331 #endif // AARCH64
2332
2333 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2334 Label backedge_counter_overflow;
2335 Label profile_method;
2336 Label dispatch;
2337
2338 if (UseLoopCounter) {
2339 // increment backedge counter for backward branches
2340 // Rdisp (R0): target offset
2341
2342 const Register Rcnt = R2_tmp;
2343 const Register Rcounters = R1_tmp;
2344
2345 // count only if backward branch
2346 #ifdef AARCH64
2347 __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM
2348 #else
2349 __ tst(Rdisp, Rdisp);
2350 __ b(dispatch, pl);
2351 #endif // AARCH64
2352
2353 if (TieredCompilation) {
2354 Label no_mdo;
2355 int increment = InvocationCounter::count_increment;
2356 if (ProfileInterpreter) {
2357 // Are we profiling?
2358 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2359 __ cbz(Rtemp, no_mdo);
2360 // Increment the MDO backedge counter
2361 const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2362 in_bytes(InvocationCounter::counter_offset()));
2363 const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2364 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2365 Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2366 __ b(dispatch);
2367 }
2368 __ bind(no_mdo);
2369 // Increment backedge counter in MethodCounters*
2370 // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
2371 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2372 Rdisp, R3_bytecode,
2373 AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2374 const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2375 __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2376 Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2377 } else { // not TieredCompilation
2378 // Increment backedge counter in MethodCounters*
2379 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2380 Rdisp, R3_bytecode,
2381 AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
2382 __ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
2383 __ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
2384 __ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter
2385
2386 __ ldr_u32(Rcnt, Address(Rcounters, inv_offset)); // load invocation counter
2387 #ifdef AARCH64
2388 __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value); // and the status bits
2389 #else
2390 __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value); // and the status bits
2391 #endif // AARCH64
2392 __ add(Rcnt, Rcnt, Rtemp); // add both counters
2393
2394 if (ProfileInterpreter) {
2395 // Test to see if we should create a method data oop
2396 const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2397 __ ldr_s32(Rtemp, profile_limit);
2398 __ cmp_32(Rcnt, Rtemp);
2399 __ b(dispatch, lt);
2400
2401 // if no method data exists, go to profile method
2402 __ test_method_data_pointer(R4_tmp, profile_method);
2403
2404 if (UseOnStackReplacement) {
2405 // check for overflow against Rbumped_taken_count, which is the MDO taken count
2406 const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2407 __ ldr_s32(Rtemp, backward_branch_limit);
2408 __ cmp(Rbumped_taken_count, Rtemp);
2409 __ b(dispatch, lo);
2410
2411 // When ProfileInterpreter is on, the backedge_count comes from the
2412 // MethodData*, which value does not get reset on the call to
2413 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2414 // routine while the method is being compiled, add a second test to make
2415 // sure the overflow function is called only once every overflow_frequency.
2416 const int overflow_frequency = 1024;
2417
2418 #ifdef AARCH64
2419 __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1));
2420 #else
2421 // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2422 assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2423 __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2424 #endif // AARCH64
2425
2426 __ b(backedge_counter_overflow, eq);
2427 }
2428 } else {
2429 if (UseOnStackReplacement) {
2430 // check for overflow against Rcnt, which is the sum of the counters
2431 const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2432 __ ldr_s32(Rtemp, backward_branch_limit);
2433 __ cmp_32(Rcnt, Rtemp);
2434 __ b(backedge_counter_overflow, hs);
2435
2436 }
2437 }
2438 }
2439 __ bind(dispatch);
2440 }
2441
2442 if (!UseOnStackReplacement) {
2443 __ bind(backedge_counter_overflow);
2444 }
2445
2446 // continue with the bytecode @ target
2447 __ dispatch_only(vtos);
2448
2449 if (UseLoopCounter) {
2450 if (ProfileInterpreter && !TieredCompilation) {
2451 // Out-of-line code to allocate method data oop.
2452 __ bind(profile_method);
2453
2454 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2455 __ set_method_data_pointer_for_bcp();
2456 // reload next bytecode
2457 __ ldrb(R3_bytecode, Address(Rbcp));
2458 __ b(dispatch);
2459 }
2460
2461 if (UseOnStackReplacement) {
2462 // invocation counter overflow
2463 __ bind(backedge_counter_overflow);
2464
2465 __ sub(R1, Rbcp, Rdisp); // branch bcp
2466 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2467
2468 // R0: osr nmethod (osr ok) or NULL (osr not possible)
2469 const Register Rnmethod = R0;
2470
2471 __ ldrb(R3_bytecode, Address(Rbcp)); // reload next bytecode
2472
2473 __ cbz(Rnmethod, dispatch); // test result, no osr if null
2474
2475 // nmethod may have been invalidated (VM may block upon call_VM return)
2476 __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2477 __ cmp(R1_tmp, nmethod::in_use);
2478 __ b(dispatch, ne);
2479
2480 // We have the address of an on stack replacement routine in Rnmethod,
2481 // We need to prepare to execute the OSR method. First we must
2482 // migrate the locals and monitors off of the stack.
2483
2484 __ mov(Rtmp_save0, Rnmethod); // save the nmethod
2485
2486 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2487
2488 // R0 is OSR buffer
2489
2490 __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2491 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2492
2493 #ifdef AARCH64
2494 __ ldp(FP, LR, Address(FP));
2495 __ mov(SP, Rtemp);
2496 #else
2497 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2498 __ bic(SP, Rtemp, StackAlignmentInBytes - 1); // Remove frame and align stack
2499 #endif // AARCH64
2500
2501 __ jump(R1_tmp);
2502 }
2503 }
2504 }
2505
2506
if_0cmp(Condition cc)2507 void TemplateTable::if_0cmp(Condition cc) {
2508 transition(itos, vtos);
2509 // assume branch is more often taken than not (loops use backward branches)
2510 Label not_taken;
2511 #ifdef AARCH64
2512 if (cc == equal) {
2513 __ cbnz_w(R0_tos, not_taken);
2514 } else if (cc == not_equal) {
2515 __ cbz_w(R0_tos, not_taken);
2516 } else {
2517 __ cmp_32(R0_tos, 0);
2518 __ b(not_taken, convNegCond(cc));
2519 }
2520 #else
2521 __ cmp_32(R0_tos, 0);
2522 __ b(not_taken, convNegCond(cc));
2523 #endif // AARCH64
2524 branch(false, false);
2525 __ bind(not_taken);
2526 __ profile_not_taken_branch(R0_tmp);
2527 }
2528
2529
if_icmp(Condition cc)2530 void TemplateTable::if_icmp(Condition cc) {
2531 transition(itos, vtos);
2532 // assume branch is more often taken than not (loops use backward branches)
2533 Label not_taken;
2534 __ pop_i(R1_tmp);
2535 __ cmp_32(R1_tmp, R0_tos);
2536 __ b(not_taken, convNegCond(cc));
2537 branch(false, false);
2538 __ bind(not_taken);
2539 __ profile_not_taken_branch(R0_tmp);
2540 }
2541
2542
if_nullcmp(Condition cc)2543 void TemplateTable::if_nullcmp(Condition cc) {
2544 transition(atos, vtos);
2545 assert(cc == equal || cc == not_equal, "invalid condition");
2546
2547 // assume branch is more often taken than not (loops use backward branches)
2548 Label not_taken;
2549 if (cc == equal) {
2550 __ cbnz(R0_tos, not_taken);
2551 } else {
2552 __ cbz(R0_tos, not_taken);
2553 }
2554 branch(false, false);
2555 __ bind(not_taken);
2556 __ profile_not_taken_branch(R0_tmp);
2557 }
2558
2559
if_acmp(Condition cc)2560 void TemplateTable::if_acmp(Condition cc) {
2561 transition(atos, vtos);
2562 // assume branch is more often taken than not (loops use backward branches)
2563 Label not_taken;
2564 __ pop_ptr(R1_tmp);
2565 __ cmp(R1_tmp, R0_tos);
2566 __ b(not_taken, convNegCond(cc));
2567 branch(false, false);
2568 __ bind(not_taken);
2569 __ profile_not_taken_branch(R0_tmp);
2570 }
2571
2572
ret()2573 void TemplateTable::ret() {
2574 transition(vtos, vtos);
2575 const Register Rlocal_index = R1_tmp;
2576 const Register Rret_bci = Rtmp_save0; // R4/R19
2577
2578 locals_index(Rlocal_index);
2579 Address local = load_iaddress(Rlocal_index, Rtemp);
2580 __ ldr_s32(Rret_bci, local); // get return bci, compute return bcp
2581 __ profile_ret(Rtmp_save1, Rret_bci);
2582 __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2583 __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2584 __ add(Rbcp, Rtemp, Rret_bci);
2585 __ dispatch_next(vtos);
2586 }
2587
2588
wide_ret()2589 void TemplateTable::wide_ret() {
2590 transition(vtos, vtos);
2591 const Register Rlocal_index = R1_tmp;
2592 const Register Rret_bci = Rtmp_save0; // R4/R19
2593
2594 locals_index_wide(Rlocal_index);
2595 Address local = load_iaddress(Rlocal_index, Rtemp);
2596 __ ldr_s32(Rret_bci, local); // get return bci, compute return bcp
2597 __ profile_ret(Rtmp_save1, Rret_bci);
2598 __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2599 __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2600 __ add(Rbcp, Rtemp, Rret_bci);
2601 __ dispatch_next(vtos);
2602 }
2603
2604
tableswitch()2605 void TemplateTable::tableswitch() {
2606 transition(itos, vtos);
2607
2608 const Register Rindex = R0_tos;
2609 #ifndef AARCH64
2610 const Register Rtemp2 = R1_tmp;
2611 #endif // !AARCH64
2612 const Register Rabcp = R2_tmp; // aligned bcp
2613 const Register Rlow = R3_tmp;
2614 const Register Rhigh = R4_tmp;
2615 const Register Roffset = R5_tmp;
2616
2617 // align bcp
2618 __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2619 __ align_reg(Rabcp, Rtemp, BytesPerInt);
2620
2621 // load lo & hi
2622 #ifdef AARCH64
2623 __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed));
2624 #else
2625 __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2626 #endif // AARCH64
2627 __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2628 __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2629
2630 // compare index with high bound
2631 __ cmp_32(Rhigh, Rindex);
2632
2633 #ifdef AARCH64
2634 Label default_case, do_dispatch;
2635 __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge);
2636 __ b(default_case, lt);
2637
2638 __ sub_w(Rindex, Rindex, Rlow);
2639 __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt));
2640 if(ProfileInterpreter) {
2641 __ sxtw(Rindex, Rindex);
2642 __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2643 }
2644 __ b(do_dispatch);
2645
2646 __ bind(default_case);
2647 __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt));
2648 if(ProfileInterpreter) {
2649 __ profile_switch_default(R0_tmp);
2650 }
2651
2652 __ bind(do_dispatch);
2653 #else
2654
2655 // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2656 __ subs(Rindex, Rindex, Rlow, ge);
2657
2658 // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2659 // ("ge" status accumulated from cmp and subs instructions) then load
2660 // offset from table, otherwise load offset for default case
2661
2662 if(ProfileInterpreter) {
2663 Label default_case, continue_execution;
2664
2665 __ b(default_case, lt);
2666 __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2667 __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2668 __ b(continue_execution);
2669
2670 __ bind(default_case);
2671 __ profile_switch_default(R0_tmp);
2672 __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2673
2674 __ bind(continue_execution);
2675 } else {
2676 __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2677 __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2678 }
2679 #endif // AARCH64
2680
2681 __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2682
2683 // load the next bytecode to R3_bytecode and advance Rbcp
2684 #ifdef AARCH64
2685 __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2686 __ ldrb(R3_bytecode, Address(Rbcp));
2687 #else
2688 __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2689 #endif // AARCH64
2690 __ dispatch_only(vtos);
2691
2692 }
2693
2694
lookupswitch()2695 void TemplateTable::lookupswitch() {
2696 transition(itos, itos);
2697 __ stop("lookupswitch bytecode should have been rewritten");
2698 }
2699
2700
fast_linearswitch()2701 void TemplateTable::fast_linearswitch() {
2702 transition(itos, vtos);
2703 Label loop, found, default_case, continue_execution;
2704
2705 const Register Rkey = R0_tos;
2706 const Register Rabcp = R2_tmp; // aligned bcp
2707 const Register Rdefault = R3_tmp;
2708 const Register Rcount = R4_tmp;
2709 const Register Roffset = R5_tmp;
2710
2711 // bswap Rkey, so we can avoid bswapping the table entries
2712 __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2713
2714 // align bcp
2715 __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2716 __ align_reg(Rabcp, Rtemp, BytesPerInt);
2717
2718 // load default & counter
2719 #ifdef AARCH64
2720 __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed));
2721 #else
2722 __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2723 #endif // AARCH64
2724 __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2725
2726 #ifdef AARCH64
2727 __ cbz_w(Rcount, default_case);
2728 #else
2729 __ cmp_32(Rcount, 0);
2730 __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2731 __ b(default_case, eq);
2732 #endif // AARCH64
2733
2734 // table search
2735 __ bind(loop);
2736 #ifdef AARCH64
2737 __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed));
2738 #endif // AARCH64
2739 __ cmp_32(Rtemp, Rkey);
2740 __ b(found, eq);
2741 __ subs(Rcount, Rcount, 1);
2742 #ifndef AARCH64
2743 __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2744 #endif // !AARCH64
2745 __ b(loop, ne);
2746
2747 // default case
2748 __ bind(default_case);
2749 __ profile_switch_default(R0_tmp);
2750 __ mov(Roffset, Rdefault);
2751 __ b(continue_execution);
2752
2753 // entry found -> get offset
2754 __ bind(found);
2755 // Rabcp is already incremented and points to the next entry
2756 __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2757 if (ProfileInterpreter) {
2758 // Calculate index of the selected case.
2759 assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2760
2761 // align bcp
2762 __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2763 __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2764
2765 // load number of cases
2766 __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2767 __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2768
2769 // Selected index = <number of cases> - <current loop count>
2770 __ sub(R1_tmp, R2_tmp, Rcount);
2771 __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2772 }
2773
2774 // continue execution
2775 __ bind(continue_execution);
2776 __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2777
2778 // load the next bytecode to R3_bytecode and advance Rbcp
2779 #ifdef AARCH64
2780 __ add(Rbcp, Rbcp, Roffset, ex_sxtw);
2781 __ ldrb(R3_bytecode, Address(Rbcp));
2782 #else
2783 __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2784 #endif // AARCH64
2785 __ dispatch_only(vtos);
2786 }
2787
2788
fast_binaryswitch()2789 void TemplateTable::fast_binaryswitch() {
2790 transition(itos, vtos);
2791 // Implementation using the following core algorithm:
2792 //
2793 // int binary_search(int key, LookupswitchPair* array, int n) {
2794 // // Binary search according to "Methodik des Programmierens" by
2795 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2796 // int i = 0;
2797 // int j = n;
2798 // while (i+1 < j) {
2799 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2800 // // with Q: for all i: 0 <= i < n: key < a[i]
2801 // // where a stands for the array and assuming that the (inexisting)
2802 // // element a[n] is infinitely big.
2803 // int h = (i + j) >> 1;
2804 // // i < h < j
2805 // if (key < array[h].fast_match()) {
2806 // j = h;
2807 // } else {
2808 // i = h;
2809 // }
2810 // }
2811 // // R: a[i] <= key < a[i+1] or Q
2812 // // (i.e., if key is within array, i is the correct index)
2813 // return i;
2814 // }
2815
2816 // register allocation
2817 const Register key = R0_tos; // already set (tosca)
2818 const Register array = R1_tmp;
2819 const Register i = R2_tmp;
2820 const Register j = R3_tmp;
2821 const Register h = R4_tmp;
2822 const Register val = R5_tmp;
2823 const Register temp1 = Rtemp;
2824 const Register temp2 = LR_tmp;
2825 const Register offset = R3_tmp;
2826
2827 // set 'array' = aligned bcp + 2 ints
2828 __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2829 __ align_reg(array, temp1, BytesPerInt);
2830
2831 // initialize i & j
2832 __ mov(i, 0); // i = 0;
2833 __ ldr_s32(j, Address(array, -BytesPerInt)); // j = length(array);
2834 // Convert j into native byteordering
2835 __ byteswap_u32(j, temp1, temp2);
2836
2837 // and start
2838 Label entry;
2839 __ b(entry);
2840
2841 // binary search loop
2842 { Label loop;
2843 __ bind(loop);
2844 // int h = (i + j) >> 1;
2845 __ add(h, i, j); // h = i + j;
2846 __ logical_shift_right(h, h, 1); // h = (i + j) >> 1;
2847 // if (key < array[h].fast_match()) {
2848 // j = h;
2849 // } else {
2850 // i = h;
2851 // }
2852 #ifdef AARCH64
2853 __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt));
2854 __ ldr_s32(val, Address(temp1));
2855 #else
2856 __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2857 #endif // AARCH64
2858 // Convert array[h].match to native byte-ordering before compare
2859 __ byteswap_u32(val, temp1, temp2);
2860 __ cmp_32(key, val);
2861 __ mov(j, h, lt); // j = h if (key < array[h].fast_match())
2862 __ mov(i, h, ge); // i = h if (key >= array[h].fast_match())
2863 // while (i+1 < j)
2864 __ bind(entry);
2865 __ add(temp1, i, 1); // i+1
2866 __ cmp(temp1, j); // i+1 < j
2867 __ b(loop, lt);
2868 }
2869
2870 // end of binary search, result index is i (must check again!)
2871 Label default_case;
2872 // Convert array[i].match to native byte-ordering before compare
2873 #ifdef AARCH64
2874 __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2875 __ ldr_s32(val, Address(temp1));
2876 #else
2877 __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2878 #endif // AARCH64
2879 __ byteswap_u32(val, temp1, temp2);
2880 __ cmp_32(key, val);
2881 __ b(default_case, ne);
2882
2883 // entry found
2884 __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2885 __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2886 __ profile_switch_case(R0, i, R1, i);
2887 __ byteswap_u32(offset, temp1, temp2);
2888 #ifdef AARCH64
2889 __ add(Rbcp, Rbcp, offset, ex_sxtw);
2890 __ ldrb(R3_bytecode, Address(Rbcp));
2891 #else
2892 __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2893 #endif // AARCH64
2894 __ dispatch_only(vtos);
2895
2896 // default case
2897 __ bind(default_case);
2898 __ profile_switch_default(R0);
2899 __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2900 __ byteswap_u32(offset, temp1, temp2);
2901 #ifdef AARCH64
2902 __ add(Rbcp, Rbcp, offset, ex_sxtw);
2903 __ ldrb(R3_bytecode, Address(Rbcp));
2904 #else
2905 __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2906 #endif // AARCH64
2907 __ dispatch_only(vtos);
2908 }
2909
2910
_return(TosState state)2911 void TemplateTable::_return(TosState state) {
2912 transition(state, state);
2913 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2914
2915 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2916 Label skip_register_finalizer;
2917 assert(state == vtos, "only valid state");
2918 __ ldr(R1, aaddress(0));
2919 __ load_klass(Rtemp, R1);
2920 __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2921 __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2922
2923 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2924
2925 __ bind(skip_register_finalizer);
2926 }
2927
2928 // Narrow result if state is itos but result type is smaller.
2929 // Need to narrow in the return bytecode rather than in generate_return_entry
2930 // since compiled code callers expect the result to already be narrowed.
2931 if (state == itos) {
2932 __ narrow(R0_tos);
2933 }
2934 __ remove_activation(state, LR);
2935
2936 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2937
2938 #ifndef AARCH64
2939 // According to interpreter calling conventions, result is returned in R0/R1,
2940 // so ftos (S0) and dtos (D0) are moved to R0/R1.
2941 // This conversion should be done after remove_activation, as it uses
2942 // push(state) & pop(state) to preserve return value.
2943 __ convert_tos_to_retval(state);
2944 #endif // !AARCH64
2945
2946 __ ret();
2947
2948 __ nop(); // to avoid filling CPU pipeline with invalid instructions
2949 __ nop();
2950 }
2951
2952
2953 // ----------------------------------------------------------------------------
2954 // Volatile variables demand their effects be made known to all CPU's in
2955 // order. Store buffers on most chips allow reads & writes to reorder; the
2956 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2957 // memory barrier (i.e., it's not sufficient that the interpreter does not
2958 // reorder volatile references, the hardware also must not reorder them).
2959 //
2960 // According to the new Java Memory Model (JMM):
2961 // (1) All volatiles are serialized wrt to each other.
2962 // ALSO reads & writes act as aquire & release, so:
2963 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2964 // the read float up to before the read. It's OK for non-volatile memory refs
2965 // that happen before the volatile read to float down below it.
2966 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2967 // that happen BEFORE the write float down to after the write. It's OK for
2968 // non-volatile memory refs that happen after the volatile write to float up
2969 // before it.
2970 //
2971 // We only put in barriers around volatile refs (they are expensive), not
2972 // _between_ memory refs (that would require us to track the flavor of the
2973 // previous memory refs). Requirements (2) and (3) require some barriers
2974 // before volatile stores and after volatile loads. These nearly cover
2975 // requirement (1) but miss the volatile-store-volatile-load case. This final
2976 // case is placed after volatile-stores although it could just as well go
2977 // before volatile-loads.
2978 // TODO-AARCH64: consider removing extra unused parameters
volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,Register tmp,bool preserve_flags,Register load_tgt)2979 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2980 Register tmp,
2981 bool preserve_flags,
2982 Register load_tgt) {
2983 #ifdef AARCH64
2984 __ membar(order_constraint);
2985 #else
2986 __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2987 #endif
2988 }
2989
2990 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
resolve_cache_and_index(int byte_no,Register Rcache,Register Rindex,size_t index_size)2991 void TemplateTable::resolve_cache_and_index(int byte_no,
2992 Register Rcache,
2993 Register Rindex,
2994 size_t index_size) {
2995 assert_different_registers(Rcache, Rindex, Rtemp);
2996
2997 Label resolved;
2998 Bytecodes::Code code = bytecode();
2999 switch (code) {
3000 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
3001 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
3002 }
3003
3004 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
3005 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
3006 __ cmp(Rtemp, code); // have we resolved this bytecode?
3007 __ b(resolved, eq);
3008
3009 // resolve first time through
3010 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
3011 __ mov(R1, code);
3012 __ call_VM(noreg, entry, R1);
3013 // Update registers with resolved info
3014 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
3015 __ bind(resolved);
3016 }
3017
3018
3019 // The Rcache and Rindex registers must be set before call
load_field_cp_cache_entry(Register Rcache,Register Rindex,Register Roffset,Register Rflags,Register Robj,bool is_static=false)3020 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
3021 Register Rindex,
3022 Register Roffset,
3023 Register Rflags,
3024 Register Robj,
3025 bool is_static = false) {
3026
3027 assert_different_registers(Rcache, Rindex, Rtemp);
3028 assert_different_registers(Roffset, Rflags, Robj, Rtemp);
3029
3030 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3031
3032 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3033
3034 // Field offset
3035 __ ldr(Roffset, Address(Rtemp,
3036 cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
3037
3038 // Flags
3039 __ ldr_u32(Rflags, Address(Rtemp,
3040 cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3041
3042 if (is_static) {
3043 __ ldr(Robj, Address(Rtemp,
3044 cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
3045 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3046 __ ldr(Robj, Address(Robj, mirror_offset));
3047 __ resolve_oop_handle(Robj);
3048 }
3049 }
3050
3051
3052 // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR.
load_invoke_cp_cache_entry(int byte_no,Register method,Register itable_index,Register flags,bool is_invokevirtual,bool is_invokevfinal,bool is_invokedynamic)3053 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
3054 Register method,
3055 Register itable_index,
3056 Register flags,
3057 bool is_invokevirtual,
3058 bool is_invokevfinal/*unused*/,
3059 bool is_invokedynamic) {
3060 // setup registers
3061 const Register cache = R2_tmp;
3062 const Register index = R3_tmp;
3063 const Register temp_reg = Rtemp;
3064 assert_different_registers(cache, index, temp_reg);
3065 assert_different_registers(method, itable_index, temp_reg);
3066
3067 // determine constant pool cache field offsets
3068 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
3069 const int method_offset = in_bytes(
3070 ConstantPoolCache::base_offset() +
3071 ((byte_no == f2_byte)
3072 ? ConstantPoolCacheEntry::f2_offset()
3073 : ConstantPoolCacheEntry::f1_offset()
3074 )
3075 );
3076 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
3077 ConstantPoolCacheEntry::flags_offset());
3078 // access constant pool cache fields
3079 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
3080 ConstantPoolCacheEntry::f2_offset());
3081
3082 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
3083 resolve_cache_and_index(byte_no, cache, index, index_size);
3084 __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
3085 __ ldr(method, Address(temp_reg, method_offset));
3086
3087 if (itable_index != noreg) {
3088 __ ldr(itable_index, Address(temp_reg, index_offset));
3089 }
3090 __ ldr_u32(flags, Address(temp_reg, flags_offset));
3091 }
3092
3093
3094 // The registers cache and index expected to be set before call, and should not be Rtemp.
3095 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3096 // except cache and index registers which are preserved.
jvmti_post_field_access(Register Rcache,Register Rindex,bool is_static,bool has_tos)3097 void TemplateTable::jvmti_post_field_access(Register Rcache,
3098 Register Rindex,
3099 bool is_static,
3100 bool has_tos) {
3101 assert_different_registers(Rcache, Rindex, Rtemp);
3102
3103 if (__ can_post_field_access()) {
3104 // Check to see if a field access watch has been set before we take
3105 // the time to call into the VM.
3106
3107 Label Lcontinue;
3108
3109 __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
3110 __ cbz(Rtemp, Lcontinue);
3111
3112 // cache entry pointer
3113 __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3114 __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
3115 if (is_static) {
3116 __ mov(R1, 0); // NULL object reference
3117 } else {
3118 __ pop(atos); // Get the object
3119 __ mov(R1, R0_tos);
3120 __ verify_oop(R1);
3121 __ push(atos); // Restore stack state
3122 }
3123 // R1: object pointer or NULL
3124 // R2: cache entry pointer
3125 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3126 R1, R2);
3127 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3128
3129 __ bind(Lcontinue);
3130 }
3131 }
3132
3133
pop_and_check_object(Register r)3134 void TemplateTable::pop_and_check_object(Register r) {
3135 __ pop_ptr(r);
3136 __ null_check(r, Rtemp); // for field access must check obj.
3137 __ verify_oop(r);
3138 }
3139
3140
getfield_or_static(int byte_no,bool is_static,RewriteControl rc)3141 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3142 transition(vtos, vtos);
3143
3144 const Register Roffset = R2_tmp;
3145 const Register Robj = R3_tmp;
3146 const Register Rcache = R4_tmp;
3147 const Register Rflagsav = Rtmp_save0; // R4/R19
3148 const Register Rindex = R5_tmp;
3149 const Register Rflags = R5_tmp;
3150
3151 const bool gen_volatile_check = os::is_MP();
3152
3153 resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3154 jvmti_post_field_access(Rcache, Rindex, is_static, false);
3155 load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3156
3157 if (gen_volatile_check) {
3158 __ mov(Rflagsav, Rflags);
3159 }
3160
3161 if (!is_static) pop_and_check_object(Robj);
3162
3163 Label Done, Lint, Ltable, shouldNotReachHere;
3164 Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3165
3166 // compute type
3167 __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3168 // Make sure we don't need to mask flags after the above shift
3169 ConstantPoolCacheEntry::verify_tos_state_shift();
3170
3171 // There are actually two versions of implementation of getfield/getstatic:
3172 //
3173 // 32-bit ARM:
3174 // 1) Table switch using add(PC,...) instruction (fast_version)
3175 // 2) Table switch using ldr(PC,...) instruction
3176 //
3177 // AArch64:
3178 // 1) Table switch using adr/add/br instructions (fast_version)
3179 // 2) Table switch using adr/ldr/br instructions
3180 //
3181 // First version requires fixed size of code block for each case and
3182 // can not be used in RewriteBytecodes and VerifyOops
3183 // modes.
3184
3185 // Size of fixed size code block for fast_version
3186 const int log_max_block_size = 2;
3187 const int max_block_size = 1 << log_max_block_size;
3188
3189 // Decide if fast version is enabled
3190 bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop;
3191
3192 // On 32-bit ARM atos and itos cases can be merged only for fast version, because
3193 // atos requires additional processing in slow version.
3194 // On AArch64 atos and itos cannot be merged.
3195 bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version);
3196
3197 assert(number_of_states == 10, "number of tos states should be equal to 9");
3198
3199 __ cmp(Rflags, itos);
3200 #ifdef AARCH64
3201 __ b(Lint, eq);
3202
3203 if(fast_version) {
3204 __ adr(Rtemp, Lbtos);
3205 __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3206 __ br(Rtemp);
3207 } else {
3208 __ adr(Rtemp, Ltable);
3209 __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3210 __ br(Rtemp);
3211 }
3212 #else
3213 if(atos_merged_with_itos) {
3214 __ cmp(Rflags, atos, ne);
3215 }
3216
3217 // table switch by type
3218 if(fast_version) {
3219 __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3220 } else {
3221 __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3222 }
3223
3224 // jump to itos/atos case
3225 __ b(Lint);
3226 #endif // AARCH64
3227
3228 // table with addresses for slow version
3229 if (fast_version) {
3230 // nothing to do
3231 } else {
3232 AARCH64_ONLY(__ align(wordSize));
3233 __ bind(Ltable);
3234 __ emit_address(Lbtos);
3235 __ emit_address(Lztos);
3236 __ emit_address(Lctos);
3237 __ emit_address(Lstos);
3238 __ emit_address(Litos);
3239 __ emit_address(Lltos);
3240 __ emit_address(Lftos);
3241 __ emit_address(Ldtos);
3242 __ emit_address(Latos);
3243 }
3244
3245 #ifdef ASSERT
3246 int seq = 0;
3247 #endif
3248 // btos
3249 {
3250 assert(btos == seq++, "btos has unexpected value");
3251 FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3252 __ bind(Lbtos);
3253 __ ldrsb(R0_tos, Address(Robj, Roffset));
3254 __ push(btos);
3255 // Rewrite bytecode to be faster
3256 if (!is_static && rc == may_rewrite) {
3257 patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3258 }
3259 __ b(Done);
3260 }
3261
3262 // ztos (same as btos for getfield)
3263 {
3264 assert(ztos == seq++, "btos has unexpected value");
3265 FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3266 __ bind(Lztos);
3267 __ ldrsb(R0_tos, Address(Robj, Roffset));
3268 __ push(ztos);
3269 // Rewrite bytecode to be faster (use btos fast getfield)
3270 if (!is_static && rc == may_rewrite) {
3271 patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
3272 }
3273 __ b(Done);
3274 }
3275
3276 // ctos
3277 {
3278 assert(ctos == seq++, "ctos has unexpected value");
3279 FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3280 __ bind(Lctos);
3281 __ ldrh(R0_tos, Address(Robj, Roffset));
3282 __ push(ctos);
3283 if (!is_static && rc == may_rewrite) {
3284 patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
3285 }
3286 __ b(Done);
3287 }
3288
3289 // stos
3290 {
3291 assert(stos == seq++, "stos has unexpected value");
3292 FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3293 __ bind(Lstos);
3294 __ ldrsh(R0_tos, Address(Robj, Roffset));
3295 __ push(stos);
3296 if (!is_static && rc == may_rewrite) {
3297 patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
3298 }
3299 __ b(Done);
3300 }
3301
3302 // itos
3303 {
3304 assert(itos == seq++, "itos has unexpected value");
3305 FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3306 __ bind(Litos);
3307 __ b(shouldNotReachHere);
3308 }
3309
3310 // ltos
3311 {
3312 assert(ltos == seq++, "ltos has unexpected value");
3313 FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3314 __ bind(Lltos);
3315 #ifdef AARCH64
3316 __ ldr(R0_tos, Address(Robj, Roffset));
3317 #else
3318 __ add(Roffset, Robj, Roffset);
3319 __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3320 #endif // AARCH64
3321 __ push(ltos);
3322 if (!is_static && rc == may_rewrite) {
3323 patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
3324 }
3325 __ b(Done);
3326 }
3327
3328 // ftos
3329 {
3330 assert(ftos == seq++, "ftos has unexpected value");
3331 FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3332 __ bind(Lftos);
3333 // floats and ints are placed on stack in same way, so
3334 // we can use push(itos) to transfer value without using VFP
3335 __ ldr_u32(R0_tos, Address(Robj, Roffset));
3336 __ push(itos);
3337 if (!is_static && rc == may_rewrite) {
3338 patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
3339 }
3340 __ b(Done);
3341 }
3342
3343 // dtos
3344 {
3345 assert(dtos == seq++, "dtos has unexpected value");
3346 FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3347 __ bind(Ldtos);
3348 // doubles and longs are placed on stack in the same way, so
3349 // we can use push(ltos) to transfer value without using VFP
3350 #ifdef AARCH64
3351 __ ldr(R0_tos, Address(Robj, Roffset));
3352 #else
3353 __ add(Rtemp, Robj, Roffset);
3354 __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3355 #endif // AARCH64
3356 __ push(ltos);
3357 if (!is_static && rc == may_rewrite) {
3358 patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
3359 }
3360 __ b(Done);
3361 }
3362
3363 // atos
3364 {
3365 assert(atos == seq++, "atos has unexpected value");
3366
3367 // atos case for AArch64 and slow version on 32-bit ARM
3368 if(!atos_merged_with_itos) {
3369 __ bind(Latos);
3370 do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3371 __ push(atos);
3372 // Rewrite bytecode to be faster
3373 if (!is_static && rc == may_rewrite) {
3374 patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
3375 }
3376 __ b(Done);
3377 }
3378 }
3379
3380 assert(vtos == seq++, "vtos has unexpected value");
3381
3382 __ bind(shouldNotReachHere);
3383 __ should_not_reach_here();
3384
3385 // itos and atos cases are frequent so it makes sense to move them out of table switch
3386 // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
3387
3388 __ bind(Lint);
3389 __ ldr_s32(R0_tos, Address(Robj, Roffset));
3390 __ push(itos);
3391 // Rewrite bytecode to be faster
3392 if (!is_static && rc == may_rewrite) {
3393 patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
3394 }
3395
3396 __ bind(Done);
3397
3398 if (gen_volatile_check) {
3399 // Check for volatile field
3400 Label notVolatile;
3401 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3402
3403 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3404
3405 __ bind(notVolatile);
3406 }
3407
3408 }
3409
getfield(int byte_no)3410 void TemplateTable::getfield(int byte_no) {
3411 getfield_or_static(byte_no, false);
3412 }
3413
nofast_getfield(int byte_no)3414 void TemplateTable::nofast_getfield(int byte_no) {
3415 getfield_or_static(byte_no, false, may_not_rewrite);
3416 }
3417
getstatic(int byte_no)3418 void TemplateTable::getstatic(int byte_no) {
3419 getfield_or_static(byte_no, true);
3420 }
3421
3422
3423 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3424 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3425 // except cache and index registers which are preserved.
jvmti_post_field_mod(Register Rcache,Register Rindex,bool is_static)3426 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3427 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3428 assert_different_registers(Rcache, Rindex, R1, Rtemp);
3429
3430 if (__ can_post_field_modification()) {
3431 // Check to see if a field modification watch has been set before we take
3432 // the time to call into the VM.
3433 Label Lcontinue;
3434
3435 __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3436 __ cbz(Rtemp, Lcontinue);
3437
3438 if (is_static) {
3439 // Life is simple. Null out the object pointer.
3440 __ mov(R1, 0);
3441 } else {
3442 // Life is harder. The stack holds the value on top, followed by the object.
3443 // We don't know the size of the value, though; it could be one or two words
3444 // depending on its type. As a result, we must find the type to determine where
3445 // the object is.
3446
3447 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3448 __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3449
3450 __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3451 // Make sure we don't need to mask Rtemp after the above shift
3452 ConstantPoolCacheEntry::verify_tos_state_shift();
3453
3454 __ cmp(Rtemp, ltos);
3455 __ cond_cmp(Rtemp, dtos, ne);
3456 #ifdef AARCH64
3457 __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2));
3458 __ mov(R1, Interpreter::expr_offset_in_bytes(1));
3459 __ mov(R1, Rtemp, eq);
3460 __ ldr(R1, Address(Rstack_top, R1));
3461 #else
3462 // two word value (ltos/dtos)
3463 __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3464
3465 // one word value (not ltos, dtos)
3466 __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3467 #endif // AARCH64
3468 }
3469
3470 // cache entry pointer
3471 __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3472 __ add(R2, R2, in_bytes(cp_base_offset));
3473
3474 // object (tos)
3475 __ mov(R3, Rstack_top);
3476
3477 // R1: object pointer set up above (NULL if static)
3478 // R2: cache entry pointer
3479 // R3: value object on the stack
3480 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3481 R1, R2, R3);
3482 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3483
3484 __ bind(Lcontinue);
3485 }
3486 }
3487
3488
putfield_or_static(int byte_no,bool is_static,RewriteControl rc)3489 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3490 transition(vtos, vtos);
3491
3492 const Register Roffset = R2_tmp;
3493 const Register Robj = R3_tmp;
3494 const Register Rcache = R4_tmp;
3495 const Register Rflagsav = Rtmp_save0; // R4/R19
3496 const Register Rindex = R5_tmp;
3497 const Register Rflags = R5_tmp;
3498
3499 const bool gen_volatile_check = os::is_MP();
3500
3501 resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3502 jvmti_post_field_mod(Rcache, Rindex, is_static);
3503 load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3504
3505 if (gen_volatile_check) {
3506 // Check for volatile field
3507 Label notVolatile;
3508 __ mov(Rflagsav, Rflags);
3509 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3510
3511 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3512
3513 __ bind(notVolatile);
3514 }
3515
3516 Label Done, Lint, shouldNotReachHere;
3517 Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3518
3519 // compute type
3520 __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3521 // Make sure we don't need to mask flags after the above shift
3522 ConstantPoolCacheEntry::verify_tos_state_shift();
3523
3524 // There are actually two versions of implementation of putfield/putstatic:
3525 //
3526 // 32-bit ARM:
3527 // 1) Table switch using add(PC,...) instruction (fast_version)
3528 // 2) Table switch using ldr(PC,...) instruction
3529 //
3530 // AArch64:
3531 // 1) Table switch using adr/add/br instructions (fast_version)
3532 // 2) Table switch using adr/ldr/br instructions
3533 //
3534 // First version requires fixed size of code block for each case and
3535 // can not be used in RewriteBytecodes and VerifyOops
3536 // modes.
3537
3538 // Size of fixed size code block for fast_version (in instructions)
3539 const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3);
3540 const int max_block_size = 1 << log_max_block_size;
3541
3542 // Decide if fast version is enabled
3543 bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits;
3544
3545 assert(number_of_states == 10, "number of tos states should be equal to 9");
3546
3547 // itos case is frequent and is moved outside table switch
3548 __ cmp(Rflags, itos);
3549
3550 #ifdef AARCH64
3551 __ b(Lint, eq);
3552
3553 if (fast_version) {
3554 __ adr(Rtemp, Lbtos);
3555 __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize));
3556 __ br(Rtemp);
3557 } else {
3558 __ adr(Rtemp, Ltable);
3559 __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags));
3560 __ br(Rtemp);
3561 }
3562 #else
3563 // table switch by type
3564 if (fast_version) {
3565 __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3566 } else {
3567 __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3568 }
3569
3570 // jump to itos case
3571 __ b(Lint);
3572 #endif // AARCH64
3573
3574 // table with addresses for slow version
3575 if (fast_version) {
3576 // nothing to do
3577 } else {
3578 AARCH64_ONLY(__ align(wordSize));
3579 __ bind(Ltable);
3580 __ emit_address(Lbtos);
3581 __ emit_address(Lztos);
3582 __ emit_address(Lctos);
3583 __ emit_address(Lstos);
3584 __ emit_address(Litos);
3585 __ emit_address(Lltos);
3586 __ emit_address(Lftos);
3587 __ emit_address(Ldtos);
3588 __ emit_address(Latos);
3589 }
3590
3591 #ifdef ASSERT
3592 int seq = 0;
3593 #endif
3594 // btos
3595 {
3596 assert(btos == seq++, "btos has unexpected value");
3597 FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3598 __ bind(Lbtos);
3599 __ pop(btos);
3600 if (!is_static) pop_and_check_object(Robj);
3601 __ strb(R0_tos, Address(Robj, Roffset));
3602 if (!is_static && rc == may_rewrite) {
3603 patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3604 }
3605 __ b(Done);
3606 }
3607
3608 // ztos
3609 {
3610 assert(ztos == seq++, "ztos has unexpected value");
3611 FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3612 __ bind(Lztos);
3613 __ pop(ztos);
3614 if (!is_static) pop_and_check_object(Robj);
3615 __ and_32(R0_tos, R0_tos, 1);
3616 __ strb(R0_tos, Address(Robj, Roffset));
3617 if (!is_static && rc == may_rewrite) {
3618 patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3619 }
3620 __ b(Done);
3621 }
3622
3623 // ctos
3624 {
3625 assert(ctos == seq++, "ctos has unexpected value");
3626 FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3627 __ bind(Lctos);
3628 __ pop(ctos);
3629 if (!is_static) pop_and_check_object(Robj);
3630 __ strh(R0_tos, Address(Robj, Roffset));
3631 if (!is_static && rc == may_rewrite) {
3632 patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3633 }
3634 __ b(Done);
3635 }
3636
3637 // stos
3638 {
3639 assert(stos == seq++, "stos has unexpected value");
3640 FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3641 __ bind(Lstos);
3642 __ pop(stos);
3643 if (!is_static) pop_and_check_object(Robj);
3644 __ strh(R0_tos, Address(Robj, Roffset));
3645 if (!is_static && rc == may_rewrite) {
3646 patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3647 }
3648 __ b(Done);
3649 }
3650
3651 // itos
3652 {
3653 assert(itos == seq++, "itos has unexpected value");
3654 FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3655 __ bind(Litos);
3656 __ b(shouldNotReachHere);
3657 }
3658
3659 // ltos
3660 {
3661 assert(ltos == seq++, "ltos has unexpected value");
3662 FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3663 __ bind(Lltos);
3664 __ pop(ltos);
3665 if (!is_static) pop_and_check_object(Robj);
3666 #ifdef AARCH64
3667 __ str(R0_tos, Address(Robj, Roffset));
3668 #else
3669 __ add(Roffset, Robj, Roffset);
3670 __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
3671 #endif // AARCH64
3672 if (!is_static && rc == may_rewrite) {
3673 patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3674 }
3675 __ b(Done);
3676 }
3677
3678 // ftos
3679 {
3680 assert(ftos == seq++, "ftos has unexpected value");
3681 FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3682 __ bind(Lftos);
3683 // floats and ints are placed on stack in the same way, so
3684 // we can use pop(itos) to transfer value without using VFP
3685 __ pop(itos);
3686 if (!is_static) pop_and_check_object(Robj);
3687 __ str_32(R0_tos, Address(Robj, Roffset));
3688 if (!is_static && rc == may_rewrite) {
3689 patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3690 }
3691 __ b(Done);
3692 }
3693
3694 // dtos
3695 {
3696 assert(dtos == seq++, "dtos has unexpected value");
3697 FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3698 __ bind(Ldtos);
3699 // doubles and longs are placed on stack in the same way, so
3700 // we can use pop(ltos) to transfer value without using VFP
3701 __ pop(ltos);
3702 if (!is_static) pop_and_check_object(Robj);
3703 #ifdef AARCH64
3704 __ str(R0_tos, Address(Robj, Roffset));
3705 #else
3706 __ add(Rtemp, Robj, Roffset);
3707 __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
3708 #endif // AARCH64
3709 if (!is_static && rc == may_rewrite) {
3710 patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3711 }
3712 __ b(Done);
3713 }
3714
3715 // atos
3716 {
3717 assert(atos == seq++, "dtos has unexpected value");
3718 __ bind(Latos);
3719 __ pop(atos);
3720 if (!is_static) pop_and_check_object(Robj);
3721 // Store into the field
3722 do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3723 if (!is_static && rc == may_rewrite) {
3724 patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3725 }
3726 __ b(Done);
3727 }
3728
3729 __ bind(shouldNotReachHere);
3730 __ should_not_reach_here();
3731
3732 // itos case is frequent and is moved outside table switch
3733 __ bind(Lint);
3734 __ pop(itos);
3735 if (!is_static) pop_and_check_object(Robj);
3736 __ str_32(R0_tos, Address(Robj, Roffset));
3737 if (!is_static && rc == may_rewrite) {
3738 patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3739 }
3740
3741 __ bind(Done);
3742
3743 if (gen_volatile_check) {
3744 Label notVolatile;
3745 if (is_static) {
3746 // Just check for volatile. Memory barrier for static final field
3747 // is handled by class initialization.
3748 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3749 volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3750 __ bind(notVolatile);
3751 } else {
3752 // Check for volatile field and final field
3753 Label skipMembar;
3754
3755 __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3756 1 << ConstantPoolCacheEntry::is_final_shift);
3757 __ b(skipMembar, eq);
3758
3759 __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3760
3761 // StoreLoad barrier after volatile field write
3762 volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3763 __ b(skipMembar);
3764
3765 // StoreStore barrier after final field write
3766 __ bind(notVolatile);
3767 volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3768
3769 __ bind(skipMembar);
3770 }
3771 }
3772
3773 }
3774
putfield(int byte_no)3775 void TemplateTable::putfield(int byte_no) {
3776 putfield_or_static(byte_no, false);
3777 }
3778
nofast_putfield(int byte_no)3779 void TemplateTable::nofast_putfield(int byte_no) {
3780 putfield_or_static(byte_no, false, may_not_rewrite);
3781 }
3782
putstatic(int byte_no)3783 void TemplateTable::putstatic(int byte_no) {
3784 putfield_or_static(byte_no, true);
3785 }
3786
3787
jvmti_post_fast_field_mod()3788 void TemplateTable::jvmti_post_fast_field_mod() {
3789 // This version of jvmti_post_fast_field_mod() is not used on ARM
3790 Unimplemented();
3791 }
3792
3793 // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR,
3794 // but preserves tosca with the given state.
jvmti_post_fast_field_mod(TosState state)3795 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3796 if (__ can_post_field_modification()) {
3797 // Check to see if a field modification watch has been set before we take
3798 // the time to call into the VM.
3799 Label done;
3800
3801 __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3802 __ cbz(R2, done);
3803
3804 __ pop_ptr(R3); // copy the object pointer from tos
3805 __ verify_oop(R3);
3806 __ push_ptr(R3); // put the object pointer back on tos
3807
3808 __ push(state); // save value on the stack
3809
3810 // access constant pool cache entry
3811 __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3812
3813 __ mov(R1, R3);
3814 assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3815 __ mov(R3, Rstack_top); // put tos addr into R3
3816
3817 // R1: object pointer copied above
3818 // R2: cache entry pointer
3819 // R3: jvalue object on the stack
3820 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3821
3822 __ pop(state); // restore value
3823
3824 __ bind(done);
3825 }
3826 }
3827
3828
fast_storefield(TosState state)3829 void TemplateTable::fast_storefield(TosState state) {
3830 transition(state, vtos);
3831
3832 ByteSize base = ConstantPoolCache::base_offset();
3833
3834 jvmti_post_fast_field_mod(state);
3835
3836 const Register Rcache = R2_tmp;
3837 const Register Rindex = R3_tmp;
3838 const Register Roffset = R3_tmp;
3839 const Register Rflags = Rtmp_save0; // R4/R19
3840 const Register Robj = R5_tmp;
3841
3842 const bool gen_volatile_check = os::is_MP();
3843
3844 // access constant pool cache
3845 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3846
3847 __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3848
3849 if (gen_volatile_check) {
3850 // load flags to test volatile
3851 __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3852 }
3853
3854 // replace index with field offset from cache entry
3855 __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3856
3857 if (gen_volatile_check) {
3858 // Check for volatile store
3859 Label notVolatile;
3860 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3861
3862 // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
3863 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3864
3865 __ bind(notVolatile);
3866 }
3867
3868 // Get object from stack
3869 pop_and_check_object(Robj);
3870
3871 // access field
3872 switch (bytecode()) {
3873 case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
3874 // fall through
3875 case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
3876 case Bytecodes::_fast_sputfield: // fall through
3877 case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
3878 case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
3879 #ifdef AARCH64
3880 case Bytecodes::_fast_lputfield: __ str (R0_tos, Address(Robj, Roffset)); break;
3881 case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
3882 case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
3883 #else
3884 case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
3885 __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3886
3887 #ifdef __SOFTFP__
3888 case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset)); break;
3889 case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3890 __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3891 #else
3892 case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
3893 __ fsts(S0_tos, Address(Robj)); break;
3894 case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
3895 __ fstd(D0_tos, Address(Robj)); break;
3896 #endif // __SOFTFP__
3897 #endif // AARCH64
3898
3899 case Bytecodes::_fast_aputfield:
3900 do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3901 break;
3902
3903 default:
3904 ShouldNotReachHere();
3905 }
3906
3907 if (gen_volatile_check) {
3908 Label notVolatile;
3909 Label skipMembar;
3910 __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3911 1 << ConstantPoolCacheEntry::is_final_shift);
3912 __ b(skipMembar, eq);
3913
3914 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3915
3916 // StoreLoad barrier after volatile field write
3917 volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3918 __ b(skipMembar);
3919
3920 // StoreStore barrier after final field write
3921 __ bind(notVolatile);
3922 volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3923
3924 __ bind(skipMembar);
3925 }
3926 }
3927
3928
fast_accessfield(TosState state)3929 void TemplateTable::fast_accessfield(TosState state) {
3930 transition(atos, state);
3931
3932 // do the JVMTI work here to avoid disturbing the register state below
3933 if (__ can_post_field_access()) {
3934 // Check to see if a field access watch has been set before we take
3935 // the time to call into the VM.
3936 Label done;
3937 __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3938 __ cbz(R2, done);
3939 // access constant pool cache entry
3940 __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3941 __ push_ptr(R0_tos); // save object pointer before call_VM() clobbers it
3942 __ verify_oop(R0_tos);
3943 __ mov(R1, R0_tos);
3944 // R1: object pointer copied above
3945 // R2: cache entry pointer
3946 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3947 __ pop_ptr(R0_tos); // restore object pointer
3948
3949 __ bind(done);
3950 }
3951
3952 const Register Robj = R0_tos;
3953 const Register Rcache = R2_tmp;
3954 const Register Rflags = R2_tmp;
3955 const Register Rindex = R3_tmp;
3956 const Register Roffset = R3_tmp;
3957
3958 const bool gen_volatile_check = os::is_MP();
3959
3960 // access constant pool cache
3961 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3962 // replace index with field offset from cache entry
3963 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3964 __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3965
3966 if (gen_volatile_check) {
3967 // load flags to test volatile
3968 __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3969 }
3970
3971 __ verify_oop(Robj);
3972 __ null_check(Robj, Rtemp);
3973
3974 // access field
3975 switch (bytecode()) {
3976 case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
3977 case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
3978 case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
3979 case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
3980 #ifdef AARCH64
3981 case Bytecodes::_fast_lgetfield: __ ldr (R0_tos, Address(Robj, Roffset)); break;
3982 case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
3983 case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
3984 #else
3985 case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
3986 __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3987 #ifdef __SOFTFP__
3988 case Bytecodes::_fast_fgetfield: __ ldr (R0_tos, Address(Robj, Roffset)); break;
3989 case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
3990 __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
3991 #else
3992 case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
3993 case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
3994 #endif // __SOFTFP__
3995 #endif // AARCH64
3996 case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
3997 default:
3998 ShouldNotReachHere();
3999 }
4000
4001 if (gen_volatile_check) {
4002 // Check for volatile load
4003 Label notVolatile;
4004 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4005
4006 // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
4007 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4008
4009 __ bind(notVolatile);
4010 }
4011 }
4012
4013
fast_xaccess(TosState state)4014 void TemplateTable::fast_xaccess(TosState state) {
4015 transition(vtos, state);
4016
4017 const Register Robj = R1_tmp;
4018 const Register Rcache = R2_tmp;
4019 const Register Rindex = R3_tmp;
4020 const Register Roffset = R3_tmp;
4021 const Register Rflags = R4_tmp;
4022 Label done;
4023
4024 // get receiver
4025 __ ldr(Robj, aaddress(0));
4026
4027 // access constant pool cache
4028 __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
4029 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
4030 __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
4031
4032 const bool gen_volatile_check = os::is_MP();
4033
4034 if (gen_volatile_check) {
4035 // load flags to test volatile
4036 __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
4037 }
4038
4039 // make sure exception is reported in correct bcp range (getfield is next instruction)
4040 __ add(Rbcp, Rbcp, 1);
4041 __ null_check(Robj, Rtemp);
4042 __ sub(Rbcp, Rbcp, 1);
4043
4044 #ifdef AARCH64
4045 if (gen_volatile_check) {
4046 Label notVolatile;
4047 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4048
4049 __ add(Rtemp, Robj, Roffset);
4050
4051 if (state == itos) {
4052 __ ldar_w(R0_tos, Rtemp);
4053 } else if (state == atos) {
4054 if (UseCompressedOops) {
4055 __ ldar_w(R0_tos, Rtemp);
4056 __ decode_heap_oop(R0_tos);
4057 } else {
4058 __ ldar(R0_tos, Rtemp);
4059 }
4060 __ verify_oop(R0_tos);
4061 } else if (state == ftos) {
4062 __ ldar_w(R0_tos, Rtemp);
4063 __ fmov_sw(S0_tos, R0_tos);
4064 } else {
4065 ShouldNotReachHere();
4066 }
4067 __ b(done);
4068
4069 __ bind(notVolatile);
4070 }
4071 #endif // AARCH64
4072
4073 if (state == itos) {
4074 __ ldr_s32(R0_tos, Address(Robj, Roffset));
4075 } else if (state == atos) {
4076 do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
4077 __ verify_oop(R0_tos);
4078 } else if (state == ftos) {
4079 #ifdef AARCH64
4080 __ ldr_s(S0_tos, Address(Robj, Roffset));
4081 #else
4082 #ifdef __SOFTFP__
4083 __ ldr(R0_tos, Address(Robj, Roffset));
4084 #else
4085 __ add(Roffset, Robj, Roffset);
4086 __ flds(S0_tos, Address(Roffset));
4087 #endif // __SOFTFP__
4088 #endif // AARCH64
4089 } else {
4090 ShouldNotReachHere();
4091 }
4092
4093 #ifndef AARCH64
4094 if (gen_volatile_check) {
4095 // Check for volatile load
4096 Label notVolatile;
4097 __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
4098
4099 volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
4100
4101 __ bind(notVolatile);
4102 }
4103 #endif // !AARCH64
4104
4105 __ bind(done);
4106 }
4107
4108
4109
4110 //----------------------------------------------------------------------------------------------------
4111 // Calls
4112
count_calls(Register method,Register temp)4113 void TemplateTable::count_calls(Register method, Register temp) {
4114 // implemented elsewhere
4115 ShouldNotReachHere();
4116 }
4117
4118
prepare_invoke(int byte_no,Register method,Register index,Register recv,Register flags)4119 void TemplateTable::prepare_invoke(int byte_no,
4120 Register method, // linked method (or i-klass)
4121 Register index, // itable index, MethodType, etc.
4122 Register recv, // if caller wants to see it
4123 Register flags // if caller wants to test it
4124 ) {
4125 // determine flags
4126 const Bytecodes::Code code = bytecode();
4127 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
4128 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
4129 const bool is_invokehandle = code == Bytecodes::_invokehandle;
4130 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
4131 const bool is_invokespecial = code == Bytecodes::_invokespecial;
4132 const bool load_receiver = (recv != noreg);
4133 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
4134 assert(recv == noreg || recv == R2, "");
4135 assert(flags == noreg || flags == R3, "");
4136
4137 // setup registers & access constant pool cache
4138 if (recv == noreg) recv = R2;
4139 if (flags == noreg) flags = R3;
4140 const Register temp = Rtemp;
4141 const Register ret_type = R1_tmp;
4142 assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
4143
4144 // save 'interpreter return address'
4145 __ save_bcp();
4146
4147 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
4148
4149 // maybe push extra argument
4150 if (is_invokedynamic || is_invokehandle) {
4151 Label L_no_push;
4152 __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
4153 __ mov(temp, index);
4154 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4155 __ load_resolved_reference_at_index(index, temp);
4156 __ verify_oop(index);
4157 __ push_ptr(index); // push appendix (MethodType, CallSite, etc.)
4158 __ bind(L_no_push);
4159 }
4160
4161 // load receiver if needed (after extra argument is pushed so parameter size is correct)
4162 if (load_receiver) {
4163 __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask); // get parameter size
4164 Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
4165 __ ldr(recv, recv_addr);
4166 __ verify_oop(recv);
4167 }
4168
4169 // compute return type
4170 __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
4171 // Make sure we don't need to mask flags after the above shift
4172 ConstantPoolCacheEntry::verify_tos_state_shift();
4173 // load return address
4174 { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
4175 __ mov_slow(temp, table);
4176 __ ldr(LR, Address::indexed_ptr(temp, ret_type));
4177 }
4178 }
4179
4180
invokevirtual_helper(Register index,Register recv,Register flags)4181 void TemplateTable::invokevirtual_helper(Register index,
4182 Register recv,
4183 Register flags) {
4184
4185 const Register recv_klass = R2_tmp;
4186
4187 assert_different_registers(index, recv, flags, Rtemp);
4188 assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
4189
4190 // Test for an invoke of a final method
4191 Label notFinal;
4192 __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
4193
4194 assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
4195
4196 // do the call - the index is actually the method to call
4197
4198 // It's final, need a null check here!
4199 __ null_check(recv, Rtemp);
4200
4201 // profile this call
4202 __ profile_final_call(R0_tmp);
4203
4204 __ jump_from_interpreted(Rmethod);
4205
4206 __ bind(notFinal);
4207
4208 // get receiver klass
4209 __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
4210 __ load_klass(recv_klass, recv);
4211
4212 // profile this call
4213 __ profile_virtual_call(R0_tmp, recv_klass);
4214
4215 // get target Method* & entry point
4216 const int base = in_bytes(Klass::vtable_start_offset());
4217 assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
4218 __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
4219 __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
4220 __ jump_from_interpreted(Rmethod);
4221 }
4222
invokevirtual(int byte_no)4223 void TemplateTable::invokevirtual(int byte_no) {
4224 transition(vtos, vtos);
4225 assert(byte_no == f2_byte, "use this argument");
4226
4227 const Register Rrecv = R2_tmp;
4228 const Register Rflags = R3_tmp;
4229
4230 prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
4231
4232 // Rmethod: index
4233 // Rrecv: receiver
4234 // Rflags: flags
4235 // LR: return address
4236
4237 invokevirtual_helper(Rmethod, Rrecv, Rflags);
4238 }
4239
4240
invokespecial(int byte_no)4241 void TemplateTable::invokespecial(int byte_no) {
4242 transition(vtos, vtos);
4243 assert(byte_no == f1_byte, "use this argument");
4244 const Register Rrecv = R2_tmp;
4245 prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
4246 __ verify_oop(Rrecv);
4247 __ null_check(Rrecv, Rtemp);
4248 // do the call
4249 __ profile_call(Rrecv);
4250 __ jump_from_interpreted(Rmethod);
4251 }
4252
4253
invokestatic(int byte_no)4254 void TemplateTable::invokestatic(int byte_no) {
4255 transition(vtos, vtos);
4256 assert(byte_no == f1_byte, "use this argument");
4257 prepare_invoke(byte_no, Rmethod);
4258 // do the call
4259 __ profile_call(R2_tmp);
4260 __ jump_from_interpreted(Rmethod);
4261 }
4262
4263
fast_invokevfinal(int byte_no)4264 void TemplateTable::fast_invokevfinal(int byte_no) {
4265 transition(vtos, vtos);
4266 assert(byte_no == f2_byte, "use this argument");
4267 __ stop("fast_invokevfinal is not used on ARM");
4268 }
4269
4270
invokeinterface(int byte_no)4271 void TemplateTable::invokeinterface(int byte_no) {
4272 transition(vtos, vtos);
4273 assert(byte_no == f1_byte, "use this argument");
4274
4275 const Register Ritable = R1_tmp;
4276 const Register Rrecv = R2_tmp;
4277 const Register Rinterf = R5_tmp;
4278 const Register Rindex = R4_tmp;
4279 const Register Rflags = R3_tmp;
4280 const Register Rklass = R2_tmp; // Note! Same register with Rrecv
4281
4282 prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
4283
4284 // First check for Object case, then private interface method,
4285 // then regular interface method.
4286
4287 // Special case of invokeinterface called for virtual method of
4288 // java.lang.Object. See cpCache.cpp for details.
4289 Label notObjectMethod;
4290 __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
4291 invokevirtual_helper(Rmethod, Rrecv, Rflags);
4292 __ bind(notObjectMethod);
4293
4294 // Get receiver klass into Rklass - also a null check
4295 __ load_klass(Rklass, Rrecv);
4296
4297 // Check for private method invocation - indicated by vfinal
4298 Label no_such_interface;
4299
4300 Label notVFinal;
4301 __ tbz(Rflags, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
4302
4303 Label subtype;
4304 __ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype);
4305 // If we get here the typecheck failed
4306 __ b(no_such_interface);
4307 __ bind(subtype);
4308
4309 // do the call
4310 __ profile_final_call(R0_tmp);
4311 __ jump_from_interpreted(Rmethod);
4312
4313 __ bind(notVFinal);
4314
4315 // Receiver subtype check against REFC.
4316 __ lookup_interface_method(// inputs: rec. class, interface
4317 Rklass, Rinterf, noreg,
4318 // outputs: scan temp. reg1, scan temp. reg2
4319 noreg, Ritable, Rtemp,
4320 no_such_interface);
4321
4322 // profile this call
4323 __ profile_virtual_call(R0_tmp, Rklass);
4324
4325 // Get declaring interface class from method
4326 __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
4327 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
4328 __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
4329
4330 // Get itable index from method
4331 __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
4332 __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
4333 __ neg(Rindex, Rtemp);
4334
4335 __ lookup_interface_method(// inputs: rec. class, interface
4336 Rklass, Rinterf, Rindex,
4337 // outputs: scan temp. reg1, scan temp. reg2
4338 Rmethod, Ritable, Rtemp,
4339 no_such_interface);
4340
4341 // Rmethod: Method* to call
4342
4343 // Check for abstract method error
4344 // Note: This should be done more efficiently via a throw_abstract_method_error
4345 // interpreter entry point and a conditional jump to it in case of a null
4346 // method.
4347 { Label L;
4348 __ cbnz(Rmethod, L);
4349 // throw exception
4350 // note: must restore interpreter registers to canonical
4351 // state for exception handling to work correctly!
4352 __ restore_method();
4353 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
4354 // the call_VM checks for exception, so we should never return here.
4355 __ should_not_reach_here();
4356 __ bind(L);
4357 }
4358
4359 // do the call
4360 __ jump_from_interpreted(Rmethod);
4361
4362 // throw exception
4363 __ bind(no_such_interface);
4364 __ restore_method();
4365 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
4366 // the call_VM checks for exception, so we should never return here.
4367 __ should_not_reach_here();
4368 }
4369
invokehandle(int byte_no)4370 void TemplateTable::invokehandle(int byte_no) {
4371 transition(vtos, vtos);
4372
4373 // TODO-AARCH64 review register usage
4374 const Register Rrecv = R2_tmp;
4375 const Register Rmtype = R4_tmp;
4376 const Register R5_method = R5_tmp; // can't reuse Rmethod!
4377
4378 prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
4379 __ null_check(Rrecv, Rtemp);
4380
4381 // Rmtype: MethodType object (from cpool->resolved_references[f1], if necessary)
4382 // Rmethod: MH.invokeExact_MT method (from f2)
4383
4384 // Note: Rmtype is already pushed (if necessary) by prepare_invoke
4385
4386 // do the call
4387 __ profile_final_call(R3_tmp); // FIXME: profile the LambdaForm also
4388 __ mov(Rmethod, R5_method);
4389 __ jump_from_interpreted(Rmethod);
4390 }
4391
invokedynamic(int byte_no)4392 void TemplateTable::invokedynamic(int byte_no) {
4393 transition(vtos, vtos);
4394
4395 // TODO-AARCH64 review register usage
4396 const Register Rcallsite = R4_tmp;
4397 const Register R5_method = R5_tmp; // can't reuse Rmethod!
4398
4399 prepare_invoke(byte_no, R5_method, Rcallsite);
4400
4401 // Rcallsite: CallSite object (from cpool->resolved_references[f1])
4402 // Rmethod: MH.linkToCallSite method (from f2)
4403
4404 // Note: Rcallsite is already pushed by prepare_invoke
4405
4406 if (ProfileInterpreter) {
4407 __ profile_call(R2_tmp);
4408 }
4409
4410 // do the call
4411 __ mov(Rmethod, R5_method);
4412 __ jump_from_interpreted(Rmethod);
4413 }
4414
4415 //----------------------------------------------------------------------------------------------------
4416 // Allocation
4417
_new()4418 void TemplateTable::_new() {
4419 transition(vtos, atos);
4420
4421 const Register Robj = R0_tos;
4422 const Register Rcpool = R1_tmp;
4423 const Register Rindex = R2_tmp;
4424 const Register Rtags = R3_tmp;
4425 const Register Rsize = R3_tmp;
4426
4427 Register Rklass = R4_tmp;
4428 assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
4429 assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
4430
4431 Label slow_case;
4432 Label done;
4433 Label initialize_header;
4434 Label initialize_object; // including clearing the fields
4435
4436 const bool allow_shared_alloc =
4437 Universe::heap()->supports_inline_contig_alloc();
4438
4439 // Literals
4440 InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
4441
4442 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4443 __ get_cpool_and_tags(Rcpool, Rtags);
4444
4445 // Make sure the class we're about to instantiate has been resolved.
4446 // This is done before loading InstanceKlass to be consistent with the order
4447 // how Constant Pool is updated (see ConstantPool::klass_at_put)
4448 const int tags_offset = Array<u1>::base_offset_in_bytes();
4449 __ add(Rtemp, Rtags, Rindex);
4450
4451 #ifdef AARCH64
4452 __ add(Rtemp, Rtemp, tags_offset);
4453 __ ldarb(Rtemp, Rtemp);
4454 #else
4455 __ ldrb(Rtemp, Address(Rtemp, tags_offset));
4456
4457 // use Rklass as a scratch
4458 volatile_barrier(MacroAssembler::LoadLoad, Rklass);
4459 #endif // AARCH64
4460
4461 // get InstanceKlass
4462 __ cmp(Rtemp, JVM_CONSTANT_Class);
4463 __ b(slow_case, ne);
4464 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
4465
4466 // make sure klass is initialized & doesn't have finalizer
4467 // make sure klass is fully initialized
4468 __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
4469 __ cmp(Rtemp, InstanceKlass::fully_initialized);
4470 __ b(slow_case, ne);
4471
4472 // get instance_size in InstanceKlass (scaled to a count of bytes)
4473 __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
4474
4475 // test to see if it has a finalizer or is malformed in some way
4476 // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
4477 __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
4478
4479 // Allocate the instance:
4480 // If TLAB is enabled:
4481 // Try to allocate in the TLAB.
4482 // If fails, go to the slow path.
4483 // Else If inline contiguous allocations are enabled:
4484 // Try to allocate in eden.
4485 // If fails due to heap end, go to slow path.
4486 //
4487 // If TLAB is enabled OR inline contiguous is enabled:
4488 // Initialize the allocation.
4489 // Exit.
4490 //
4491 // Go to slow path.
4492 if (UseTLAB) {
4493 const Register Rtlab_top = R1_tmp;
4494 const Register Rtlab_end = R2_tmp;
4495 assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
4496
4497 __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset()));
4498 __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset())));
4499 __ add(Rtlab_top, Robj, Rsize);
4500 __ cmp(Rtlab_top, Rtlab_end);
4501 __ b(slow_case, hi);
4502 __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset()));
4503 if (ZeroTLAB) {
4504 // the fields have been already cleared
4505 __ b(initialize_header);
4506 } else {
4507 // initialize both the header and fields
4508 __ b(initialize_object);
4509 }
4510 } else {
4511 // Allocation in the shared Eden, if allowed.
4512 if (allow_shared_alloc) {
4513 const Register Rheap_top_addr = R2_tmp;
4514 const Register Rheap_top = R5_tmp;
4515 const Register Rheap_end = Rtemp;
4516 assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
4517
4518 // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS
4519 __ ldr_literal(Rheap_top_addr, Lheap_top_addr);
4520
4521 Label retry;
4522 __ bind(retry);
4523
4524 #ifdef AARCH64
4525 __ ldxr(Robj, Rheap_top_addr);
4526 #else
4527 __ ldr(Robj, Address(Rheap_top_addr));
4528 #endif // AARCH64
4529
4530 __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr()));
4531 __ add(Rheap_top, Robj, Rsize);
4532 __ cmp(Rheap_top, Rheap_end);
4533 __ b(slow_case, hi);
4534
4535 // Update heap top atomically.
4536 // If someone beats us on the allocation, try again, otherwise continue.
4537 #ifdef AARCH64
4538 __ stxr(Rtemp2, Rheap_top, Rheap_top_addr);
4539 __ cbnz_w(Rtemp2, retry);
4540 #else
4541 __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/);
4542 __ b(retry, ne);
4543 #endif // AARCH64
4544
4545 __ incr_allocated_bytes(Rsize, Rtemp);
4546 }
4547 }
4548
4549 if (UseTLAB || allow_shared_alloc) {
4550 const Register Rzero0 = R1_tmp;
4551 const Register Rzero1 = R2_tmp;
4552 const Register Rzero_end = R5_tmp;
4553 const Register Rzero_cur = Rtemp;
4554 assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4555
4556 // The object is initialized before the header. If the object size is
4557 // zero, go directly to the header initialization.
4558 __ bind(initialize_object);
4559 __ subs(Rsize, Rsize, sizeof(oopDesc));
4560 __ add(Rzero_cur, Robj, sizeof(oopDesc));
4561 __ b(initialize_header, eq);
4562
4563 #ifdef ASSERT
4564 // make sure Rsize is a multiple of 8
4565 Label L;
4566 __ tst(Rsize, 0x07);
4567 __ b(L, eq);
4568 __ stop("object size is not multiple of 8 - adjust this code");
4569 __ bind(L);
4570 #endif
4571
4572 #ifdef AARCH64
4573 {
4574 Label loop;
4575 // Step back by 1 word if object size is not a multiple of 2*wordSize.
4576 assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word");
4577 __ andr(Rtemp2, Rsize, (uintx)wordSize);
4578 __ sub(Rzero_cur, Rzero_cur, Rtemp2);
4579
4580 // Zero by 2 words per iteration.
4581 __ bind(loop);
4582 __ subs(Rsize, Rsize, 2*wordSize);
4583 __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed));
4584 __ b(loop, gt);
4585 }
4586 #else
4587 __ mov(Rzero0, 0);
4588 __ mov(Rzero1, 0);
4589 __ add(Rzero_end, Rzero_cur, Rsize);
4590
4591 // initialize remaining object fields: Rsize was a multiple of 8
4592 { Label loop;
4593 // loop is unrolled 2 times
4594 __ bind(loop);
4595 // #1
4596 __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4597 __ cmp(Rzero_cur, Rzero_end);
4598 // #2
4599 __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4600 __ cmp(Rzero_cur, Rzero_end, ne);
4601 __ b(loop, ne);
4602 }
4603 #endif // AARCH64
4604
4605 // initialize object header only.
4606 __ bind(initialize_header);
4607 if (UseBiasedLocking) {
4608 __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4609 } else {
4610 __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
4611 }
4612 // mark
4613 __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4614
4615 // klass
4616 #ifdef AARCH64
4617 __ store_klass_gap(Robj);
4618 #endif // AARCH64
4619 __ store_klass(Rklass, Robj); // blows Rklass:
4620 Rklass = noreg;
4621
4622 // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4623 if (DTraceAllocProbes) {
4624 // Trigger dtrace event for fastpath
4625 Label Lcontinue;
4626
4627 __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4628 __ cbz(Rtemp, Lcontinue);
4629
4630 __ push(atos);
4631 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4632 __ pop(atos);
4633
4634 __ bind(Lcontinue);
4635 }
4636
4637 __ b(done);
4638 } else {
4639 // jump over literals
4640 __ b(slow_case);
4641 }
4642
4643 if (allow_shared_alloc) {
4644 __ bind_literal(Lheap_top_addr);
4645 }
4646
4647 // slow case
4648 __ bind(slow_case);
4649 __ get_constant_pool(Rcpool);
4650 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4651 __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4652
4653 // continue
4654 __ bind(done);
4655
4656 // StoreStore barrier required after complete initialization
4657 // (headers + content zeroing), before the object may escape.
4658 __ membar(MacroAssembler::StoreStore, R1_tmp);
4659 }
4660
4661
newarray()4662 void TemplateTable::newarray() {
4663 transition(itos, atos);
4664 __ ldrb(R1, at_bcp(1));
4665 __ mov(R2, R0_tos);
4666 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4667 // MacroAssembler::StoreStore useless (included in the runtime exit path)
4668 }
4669
4670
anewarray()4671 void TemplateTable::anewarray() {
4672 transition(itos, atos);
4673 __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4674 __ get_constant_pool(R1);
4675 __ mov(R3, R0_tos);
4676 call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4677 // MacroAssembler::StoreStore useless (included in the runtime exit path)
4678 }
4679
4680
arraylength()4681 void TemplateTable::arraylength() {
4682 transition(atos, itos);
4683 __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4684 __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4685 }
4686
4687
checkcast()4688 void TemplateTable::checkcast() {
4689 transition(atos, atos);
4690 Label done, is_null, quicked, resolved, throw_exception;
4691
4692 const Register Robj = R0_tos;
4693 const Register Rcpool = R2_tmp;
4694 const Register Rtags = R3_tmp;
4695 const Register Rindex = R4_tmp;
4696 const Register Rsuper = R3_tmp;
4697 const Register Rsub = R4_tmp;
4698 const Register Rsubtype_check_tmp1 = R1_tmp;
4699 const Register Rsubtype_check_tmp2 = LR_tmp;
4700
4701 __ cbz(Robj, is_null);
4702
4703 // Get cpool & tags index
4704 __ get_cpool_and_tags(Rcpool, Rtags);
4705 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4706
4707 // See if bytecode has already been quicked
4708 __ add(Rtemp, Rtags, Rindex);
4709 #ifdef AARCH64
4710 // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4711 __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4712 __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4713 #else
4714 __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4715 #endif // AARCH64
4716
4717 __ cmp(Rtemp, JVM_CONSTANT_Class);
4718
4719 #ifndef AARCH64
4720 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4721 #endif // !AARCH64
4722
4723 __ b(quicked, eq);
4724
4725 __ push(atos);
4726 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4727 // vm_result_2 has metadata result
4728 __ get_vm_result_2(Rsuper, Robj);
4729 __ pop_ptr(Robj);
4730 __ b(resolved);
4731
4732 __ bind(throw_exception);
4733 // Come here on failure of subtype check
4734 __ profile_typecheck_failed(R1_tmp);
4735 __ mov(R2_ClassCastException_obj, Robj); // convention with generate_ClassCastException_handler()
4736 __ b(Interpreter::_throw_ClassCastException_entry);
4737
4738 // Get superklass in Rsuper and subklass in Rsub
4739 __ bind(quicked);
4740 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4741
4742 __ bind(resolved);
4743 __ load_klass(Rsub, Robj);
4744
4745 // Generate subtype check. Blows both tmps and Rtemp.
4746 assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4747 __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4748
4749 // Come here on success
4750
4751 // Collect counts on whether this check-cast sees NULLs a lot or not.
4752 if (ProfileInterpreter) {
4753 __ b(done);
4754 __ bind(is_null);
4755 __ profile_null_seen(R1_tmp);
4756 } else {
4757 __ bind(is_null); // same as 'done'
4758 }
4759 __ bind(done);
4760 }
4761
4762
instanceof()4763 void TemplateTable::instanceof() {
4764 // result = 0: obj == NULL or obj is not an instanceof the specified klass
4765 // result = 1: obj != NULL and obj is an instanceof the specified klass
4766
4767 transition(atos, itos);
4768 Label done, is_null, not_subtype, quicked, resolved;
4769
4770 const Register Robj = R0_tos;
4771 const Register Rcpool = R2_tmp;
4772 const Register Rtags = R3_tmp;
4773 const Register Rindex = R4_tmp;
4774 const Register Rsuper = R3_tmp;
4775 const Register Rsub = R4_tmp;
4776 const Register Rsubtype_check_tmp1 = R0_tmp;
4777 const Register Rsubtype_check_tmp2 = R1_tmp;
4778
4779 __ cbz(Robj, is_null);
4780
4781 __ load_klass(Rsub, Robj);
4782
4783 // Get cpool & tags index
4784 __ get_cpool_and_tags(Rcpool, Rtags);
4785 __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4786
4787 // See if bytecode has already been quicked
4788 __ add(Rtemp, Rtags, Rindex);
4789 #ifdef AARCH64
4790 // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough
4791 __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes());
4792 __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier
4793 #else
4794 __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4795 #endif // AARCH64
4796 __ cmp(Rtemp, JVM_CONSTANT_Class);
4797
4798 #ifndef AARCH64
4799 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4800 #endif // !AARCH64
4801
4802 __ b(quicked, eq);
4803
4804 __ push(atos);
4805 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4806 // vm_result_2 has metadata result
4807 __ get_vm_result_2(Rsuper, Robj);
4808 __ pop_ptr(Robj);
4809 __ b(resolved);
4810
4811 // Get superklass in Rsuper and subklass in Rsub
4812 __ bind(quicked);
4813 __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4814
4815 __ bind(resolved);
4816 __ load_klass(Rsub, Robj);
4817
4818 // Generate subtype check. Blows both tmps and Rtemp.
4819 __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4820
4821 // Come here on success
4822 __ mov(R0_tos, 1);
4823 __ b(done);
4824
4825 __ bind(not_subtype);
4826 // Come here on failure
4827 __ profile_typecheck_failed(R1_tmp);
4828 __ mov(R0_tos, 0);
4829
4830 // Collect counts on whether this test sees NULLs a lot or not.
4831 if (ProfileInterpreter) {
4832 __ b(done);
4833 __ bind(is_null);
4834 __ profile_null_seen(R1_tmp);
4835 } else {
4836 __ bind(is_null); // same as 'done'
4837 }
4838 __ bind(done);
4839 }
4840
4841
4842 //----------------------------------------------------------------------------------------------------
4843 // Breakpoints
_breakpoint()4844 void TemplateTable::_breakpoint() {
4845
4846 // Note: We get here even if we are single stepping..
4847 // jbug inists on setting breakpoints at every bytecode
4848 // even if we are in single step mode.
4849
4850 transition(vtos, vtos);
4851
4852 // get the unpatched byte code
4853 __ mov(R1, Rmethod);
4854 __ mov(R2, Rbcp);
4855 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4856 #ifdef AARCH64
4857 __ sxtw(Rtmp_save0, R0);
4858 #else
4859 __ mov(Rtmp_save0, R0);
4860 #endif // AARCH64
4861
4862 // post the breakpoint event
4863 __ mov(R1, Rmethod);
4864 __ mov(R2, Rbcp);
4865 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4866
4867 // complete the execution of original bytecode
4868 __ mov(R3_bytecode, Rtmp_save0);
4869 __ dispatch_only_normal(vtos);
4870 }
4871
4872
4873 //----------------------------------------------------------------------------------------------------
4874 // Exceptions
4875
athrow()4876 void TemplateTable::athrow() {
4877 transition(atos, vtos);
4878 __ mov(Rexception_obj, R0_tos);
4879 __ null_check(Rexception_obj, Rtemp);
4880 __ b(Interpreter::throw_exception_entry());
4881 }
4882
4883
4884 //----------------------------------------------------------------------------------------------------
4885 // Synchronization
4886 //
4887 // Note: monitorenter & exit are symmetric routines; which is reflected
4888 // in the assembly code structure as well
4889 //
4890 // Stack layout:
4891 //
4892 // [expressions ] <--- Rstack_top = expression stack top
4893 // ..
4894 // [expressions ]
4895 // [monitor entry] <--- monitor block top = expression stack bot
4896 // ..
4897 // [monitor entry]
4898 // [frame data ] <--- monitor block bot
4899 // ...
4900 // [saved FP ] <--- FP
4901
4902
monitorenter()4903 void TemplateTable::monitorenter() {
4904 transition(atos, vtos);
4905
4906 const Register Robj = R0_tos;
4907 const Register Rentry = R1_tmp;
4908
4909 // check for NULL object
4910 __ null_check(Robj, Rtemp);
4911
4912 const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4913 assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4914 Label allocate_monitor, allocated;
4915
4916 // initialize entry pointer
4917 __ mov(Rentry, 0); // points to free slot or NULL
4918
4919 // find a free slot in the monitor block (result in Rentry)
4920 { Label loop, exit;
4921 const Register Rcur = R2_tmp;
4922 const Register Rcur_obj = Rtemp;
4923 const Register Rbottom = R3_tmp;
4924 assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4925
4926 __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4927 // points to current entry, starting with top-most entry
4928 __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4929 // points to word before bottom of monitor block
4930
4931 __ cmp(Rcur, Rbottom); // check if there are no monitors
4932 #ifndef AARCH64
4933 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4934 // prefetch monitor's object for the first iteration
4935 #endif // !AARCH64
4936 __ b(allocate_monitor, eq); // there are no monitors, skip searching
4937
4938 __ bind(loop);
4939 #ifdef AARCH64
4940 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
4941 #endif // AARCH64
4942 __ cmp(Rcur_obj, 0); // check if current entry is used
4943 __ mov(Rentry, Rcur, eq); // if not used then remember entry
4944
4945 __ cmp(Rcur_obj, Robj); // check if current entry is for same object
4946 __ b(exit, eq); // if same object then stop searching
4947
4948 __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry
4949
4950 __ cmp(Rcur, Rbottom); // check if bottom reached
4951 #ifndef AARCH64
4952 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4953 // prefetch monitor's object for the next iteration
4954 #endif // !AARCH64
4955 __ b(loop, ne); // if not at bottom then check this entry
4956 __ bind(exit);
4957 }
4958
4959 __ cbnz(Rentry, allocated); // check if a slot has been found; if found, continue with that one
4960
4961 __ bind(allocate_monitor);
4962
4963 // allocate one if there's no free slot
4964 { Label loop;
4965 assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4966
4967 // 1. compute new pointers
4968
4969 #ifdef AARCH64
4970 __ check_extended_sp(Rtemp);
4971 __ sub(SP, SP, entry_size); // adjust extended SP
4972 __ mov(Rtemp, SP);
4973 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
4974 #endif // AARCH64
4975
4976 __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4977 // old monitor block top / expression stack bottom
4978
4979 __ sub(Rstack_top, Rstack_top, entry_size); // move expression stack top
4980 __ check_stack_top_on_expansion();
4981
4982 __ sub(Rentry, Rentry, entry_size); // move expression stack bottom
4983
4984 __ mov(R2_tmp, Rstack_top); // set start value for copy loop
4985
4986 __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4987 // set new monitor block top
4988
4989 // 2. move expression stack contents
4990
4991 __ cmp(R2_tmp, Rentry); // check if expression stack is empty
4992 #ifndef AARCH64
4993 __ ldr(Rtemp, Address(R2_tmp, entry_size), ne); // load expression stack word from old location
4994 #endif // !AARCH64
4995 __ b(allocated, eq);
4996
4997 __ bind(loop);
4998 #ifdef AARCH64
4999 __ ldr(Rtemp, Address(R2_tmp, entry_size)); // load expression stack word from old location
5000 #endif // AARCH64
5001 __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
5002 // and advance to next word
5003 __ cmp(R2_tmp, Rentry); // check if bottom reached
5004 #ifndef AARCH64
5005 __ ldr(Rtemp, Address(R2, entry_size), ne); // load expression stack word from old location
5006 #endif // !AARCH64
5007 __ b(loop, ne); // if not at bottom then copy next word
5008 }
5009
5010 // call run-time routine
5011
5012 // Rentry: points to monitor entry
5013 __ bind(allocated);
5014
5015 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
5016 // The object has already been poped from the stack, so the expression stack looks correct.
5017 __ add(Rbcp, Rbcp, 1);
5018
5019 __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes())); // store object
5020 __ lock_object(Rentry);
5021
5022 // check to make sure this monitor doesn't cause stack overflow after locking
5023 __ save_bcp(); // in case of exception
5024 __ arm_stack_overflow_check(0, Rtemp);
5025
5026 // The bcp has already been incremented. Just need to dispatch to next instruction.
5027 __ dispatch_next(vtos);
5028 }
5029
5030
monitorexit()5031 void TemplateTable::monitorexit() {
5032 transition(atos, vtos);
5033
5034 const Register Robj = R0_tos;
5035 const Register Rcur = R1_tmp;
5036 const Register Rbottom = R2_tmp;
5037 const Register Rcur_obj = Rtemp;
5038
5039 // check for NULL object
5040 __ null_check(Robj, Rtemp);
5041
5042 const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
5043 Label found, throw_exception;
5044
5045 // find matching slot
5046 { Label loop;
5047 assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
5048
5049 __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
5050 // points to current entry, starting with top-most entry
5051 __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
5052 // points to word before bottom of monitor block
5053
5054 __ cmp(Rcur, Rbottom); // check if bottom reached
5055 #ifndef AARCH64
5056 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5057 // prefetch monitor's object for the first iteration
5058 #endif // !AARCH64
5059 __ b(throw_exception, eq); // throw exception if there are now monitors
5060
5061 __ bind(loop);
5062 #ifdef AARCH64
5063 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()));
5064 #endif // AARCH64
5065 // check if current entry is for same object
5066 __ cmp(Rcur_obj, Robj);
5067 __ b(found, eq); // if same object then stop searching
5068 __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry
5069 __ cmp(Rcur, Rbottom); // check if bottom reached
5070 #ifndef AARCH64
5071 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
5072 #endif // !AARCH64
5073 __ b (loop, ne); // if not at bottom then check this entry
5074 }
5075
5076 // error handling. Unlocking was not block-structured
5077 __ bind(throw_exception);
5078 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
5079 __ should_not_reach_here();
5080
5081 // call run-time routine
5082 // Rcur: points to monitor entry
5083 __ bind(found);
5084 __ push_ptr(Robj); // make sure object is on stack (contract with oopMaps)
5085 __ unlock_object(Rcur);
5086 __ pop_ptr(Robj); // discard object
5087 }
5088
5089
5090 //----------------------------------------------------------------------------------------------------
5091 // Wide instructions
5092
wide()5093 void TemplateTable::wide() {
5094 transition(vtos, vtos);
5095 __ ldrb(R3_bytecode, at_bcp(1));
5096
5097 InlinedAddress Ltable((address)Interpreter::_wentry_point);
5098 __ ldr_literal(Rtemp, Ltable);
5099 __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
5100
5101 __ nop(); // to avoid filling CPU pipeline with invalid instructions
5102 __ nop();
5103 __ bind_literal(Ltable);
5104 }
5105
5106
5107 //----------------------------------------------------------------------------------------------------
5108 // Multi arrays
5109
multianewarray()5110 void TemplateTable::multianewarray() {
5111 transition(vtos, atos);
5112 __ ldrb(Rtmp_save0, at_bcp(3)); // get number of dimensions
5113
5114 // last dim is on top of stack; we want address of first one:
5115 // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
5116 // the latter wordSize to point to the beginning of the array.
5117 __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5118 __ sub(R1, Rtemp, wordSize);
5119
5120 call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
5121 __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
5122 // MacroAssembler::StoreStore useless (included in the runtime exit path)
5123 }
5124