1 /*
2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSetAssembler.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "interpreter/templateTable.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/safepointMechanism.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "runtime/synchronizer.hpp"
42
43 #ifdef PRODUCT
44 #define __ _masm->
45 #define BLOCK_COMMENT(str)
46 #define BIND(label) __ bind(label);
47 #else
48 #define __ (PRODUCT_ONLY(false&&)Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
49 #define BLOCK_COMMENT(str) __ block_comment(str)
50 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":")
51 #endif
52
53 // The assumed minimum size of a BranchTableBlock.
54 // The actual size of each block heavily depends on the CPU capabilities and,
55 // of course, on the logic implemented in each block.
56 #ifdef ASSERT
57 #define BTB_MINSIZE 256
58 #else
59 #define BTB_MINSIZE 64
60 #endif
61
62 #ifdef ASSERT
63 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch).
64 #define BTB_BEGIN(lbl, alignment, name) \
65 __ align_address(alignment); \
66 __ bind(lbl); \
67 { unsigned int b_off = __ offset(); \
68 uintptr_t b_addr = (uintptr_t)__ pc(); \
69 __ z_larl(Z_R0, (int64_t)0); /* Check current address alignment. */ \
70 __ z_slgr(Z_R0, br_tab); /* Current Address must be equal */ \
71 __ z_slgr(Z_R0, flags); /* to calculated branch target. */ \
72 __ z_brc(Assembler::bcondLogZero, 3); /* skip trap if ok. */ \
73 __ z_illtrap(0x55); \
74 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name);
75
76 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch).
77 #define BTB_END(lbl, alignment, name) \
78 uintptr_t e_addr = (uintptr_t)__ pc(); \
79 unsigned int e_off = __ offset(); \
80 unsigned int len = e_off-b_off; \
81 if (len > alignment) { \
82 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \
83 len, alignment, e_addr-len, name); \
84 guarantee(len <= alignment, "block too large"); \
85 } \
86 guarantee(len == e_addr-b_addr, "block len mismatch"); \
87 }
88 #else
89 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch).
90 #define BTB_BEGIN(lbl, alignment, name) \
91 __ align_address(alignment); \
92 __ bind(lbl); \
93 { unsigned int b_off = __ offset(); \
94 uintptr_t b_addr = (uintptr_t)__ pc(); \
95 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name);
96
97 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch).
98 #define BTB_END(lbl, alignment, name) \
99 uintptr_t e_addr = (uintptr_t)__ pc(); \
100 unsigned int e_off = __ offset(); \
101 unsigned int len = e_off-b_off; \
102 if (len > alignment) { \
103 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \
104 len, alignment, e_addr-len, name); \
105 guarantee(len <= alignment, "block too large"); \
106 } \
107 guarantee(len == e_addr-b_addr, "block len mismatch"); \
108 }
109 #endif // ASSERT
110
111 // Platform-dependent initialization.
112
pd_initialize()113 void TemplateTable::pd_initialize() {
114 // No specific initialization.
115 }
116
117 // Address computation: local variables
118
iaddress(int n)119 static inline Address iaddress(int n) {
120 return Address(Z_locals, Interpreter::local_offset_in_bytes(n));
121 }
122
laddress(int n)123 static inline Address laddress(int n) {
124 return iaddress(n + 1);
125 }
126
faddress(int n)127 static inline Address faddress(int n) {
128 return iaddress(n);
129 }
130
daddress(int n)131 static inline Address daddress(int n) {
132 return laddress(n);
133 }
134
aaddress(int n)135 static inline Address aaddress(int n) {
136 return iaddress(n);
137 }
138
139 // Pass NULL, if no shift instruction should be emitted.
iaddress(InterpreterMacroAssembler * masm,Register r)140 static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) {
141 if (masm) {
142 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes
143 }
144 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(0));
145 }
146
147 // Pass NULL, if no shift instruction should be emitted.
laddress(InterpreterMacroAssembler * masm,Register r)148 static inline Address laddress(InterpreterMacroAssembler *masm, Register r) {
149 if (masm) {
150 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes
151 }
152 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(1) );
153 }
154
faddress(InterpreterMacroAssembler * masm,Register r)155 static inline Address faddress(InterpreterMacroAssembler *masm, Register r) {
156 return iaddress(masm, r);
157 }
158
daddress(InterpreterMacroAssembler * masm,Register r)159 static inline Address daddress(InterpreterMacroAssembler *masm, Register r) {
160 return laddress(masm, r);
161 }
162
aaddress(InterpreterMacroAssembler * masm,Register r)163 static inline Address aaddress(InterpreterMacroAssembler *masm, Register r) {
164 return iaddress(masm, r);
165 }
166
167 // At top of Java expression stack which may be different than esp(). It
168 // isn't for category 1 objects.
at_tos(int slot=0)169 static inline Address at_tos(int slot = 0) {
170 return Address(Z_esp, Interpreter::expr_offset_in_bytes(slot));
171 }
172
173 // Condition conversion
j_not(TemplateTable::Condition cc)174 static Assembler::branch_condition j_not(TemplateTable::Condition cc) {
175 switch (cc) {
176 case TemplateTable::equal :
177 return Assembler::bcondNotEqual;
178 case TemplateTable::not_equal :
179 return Assembler::bcondEqual;
180 case TemplateTable::less :
181 return Assembler::bcondNotLow;
182 case TemplateTable::less_equal :
183 return Assembler::bcondHigh;
184 case TemplateTable::greater :
185 return Assembler::bcondNotHigh;
186 case TemplateTable::greater_equal:
187 return Assembler::bcondLow;
188 }
189 ShouldNotReachHere();
190 return Assembler::bcondZero;
191 }
192
193 // Do an oop store like *(base + offset) = val
194 // offset can be a register or a constant.
do_oop_store(InterpreterMacroAssembler * _masm,const Address & addr,Register val,Register tmp1,Register tmp2,Register tmp3,DecoratorSet decorators)195 static void do_oop_store(InterpreterMacroAssembler* _masm,
196 const Address& addr,
197 Register val, // Noreg means always null.
198 Register tmp1,
199 Register tmp2,
200 Register tmp3,
201 DecoratorSet decorators) {
202 assert_different_registers(tmp1, tmp2, tmp3, val, addr.base());
203 __ store_heap_oop(val, addr, tmp1, tmp2, tmp3, decorators);
204 }
205
do_oop_load(InterpreterMacroAssembler * _masm,const Address & addr,Register dst,Register tmp1,Register tmp2,DecoratorSet decorators)206 static void do_oop_load(InterpreterMacroAssembler* _masm,
207 const Address& addr,
208 Register dst,
209 Register tmp1,
210 Register tmp2,
211 DecoratorSet decorators) {
212 assert_different_registers(addr.base(), tmp1, tmp2);
213 assert_different_registers(dst, tmp1, tmp2);
214 __ load_heap_oop(dst, addr, tmp1, tmp2, decorators);
215 }
216
at_bcp(int offset)217 Address TemplateTable::at_bcp(int offset) {
218 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
219 return Address(Z_bcp, offset);
220 }
221
patch_bytecode(Bytecodes::Code bc,Register bc_reg,Register temp_reg,bool load_bc_into_bc_reg,int byte_no)222 void TemplateTable::patch_bytecode(Bytecodes::Code bc,
223 Register bc_reg,
224 Register temp_reg,
225 bool load_bc_into_bc_reg, // = true
226 int byte_no) {
227 if (!RewriteBytecodes) { return; }
228
229 NearLabel L_patch_done;
230 BLOCK_COMMENT("patch_bytecode {");
231
232 switch (bc) {
233 case Bytecodes::_fast_aputfield:
234 case Bytecodes::_fast_bputfield:
235 case Bytecodes::_fast_zputfield:
236 case Bytecodes::_fast_cputfield:
237 case Bytecodes::_fast_dputfield:
238 case Bytecodes::_fast_fputfield:
239 case Bytecodes::_fast_iputfield:
240 case Bytecodes::_fast_lputfield:
241 case Bytecodes::_fast_sputfield:
242 {
243 // We skip bytecode quickening for putfield instructions when
244 // the put_code written to the constant pool cache is zero.
245 // This is required so that every execution of this instruction
246 // calls out to InterpreterRuntime::resolve_get_put to do
247 // additional, required work.
248 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
249 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
250 __ get_cache_and_index_and_bytecode_at_bcp(Z_R1_scratch, bc_reg,
251 temp_reg, byte_no, 1);
252 __ load_const_optimized(bc_reg, bc);
253 __ compareU32_and_branch(temp_reg, (intptr_t)0,
254 Assembler::bcondZero, L_patch_done);
255 }
256 break;
257 default:
258 assert(byte_no == -1, "sanity");
259 // The pair bytecodes have already done the load.
260 if (load_bc_into_bc_reg) {
261 __ load_const_optimized(bc_reg, bc);
262 }
263 break;
264 }
265
266 if (JvmtiExport::can_post_breakpoint()) {
267
268 Label L_fast_patch;
269
270 // If a breakpoint is present we can't rewrite the stream directly.
271 __ z_cli(at_bcp(0), Bytecodes::_breakpoint);
272 __ z_brne(L_fast_patch);
273 __ get_method(temp_reg);
274 // Let breakpoint table handling rewrite to quicker bytecode.
275 __ call_VM_static(noreg,
276 CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at),
277 temp_reg, Z_R13, bc_reg);
278 __ z_bru(L_patch_done);
279
280 __ bind(L_fast_patch);
281 }
282
283 #ifdef ASSERT
284 NearLabel L_okay;
285
286 // We load into 64 bits, since this works on any CPU.
287 __ z_llgc(temp_reg, at_bcp(0));
288 __ compareU32_and_branch(temp_reg, Bytecodes::java_code(bc),
289 Assembler::bcondEqual, L_okay );
290 __ compareU32_and_branch(temp_reg, bc_reg, Assembler::bcondEqual, L_okay);
291 __ stop_static("patching the wrong bytecode");
292 __ bind(L_okay);
293 #endif
294
295 // Patch bytecode.
296 __ z_stc(bc_reg, at_bcp(0));
297
298 __ bind(L_patch_done);
299 BLOCK_COMMENT("} patch_bytecode");
300 }
301
302 // Individual instructions
303
nop()304 void TemplateTable::nop() {
305 transition(vtos, vtos);
306 }
307
shouldnotreachhere()308 void TemplateTable::shouldnotreachhere() {
309 transition(vtos, vtos);
310 __ stop("shouldnotreachhere bytecode");
311 }
312
aconst_null()313 void TemplateTable::aconst_null() {
314 transition(vtos, atos);
315 __ clear_reg(Z_tos, true, false);
316 }
317
iconst(int value)318 void TemplateTable::iconst(int value) {
319 transition(vtos, itos);
320 // Zero extension of the iconst makes zero extension at runtime obsolete.
321 __ load_const_optimized(Z_tos, ((unsigned long)(unsigned int)value));
322 }
323
lconst(int value)324 void TemplateTable::lconst(int value) {
325 transition(vtos, ltos);
326 __ load_const_optimized(Z_tos, value);
327 }
328
329 // No pc-relative load/store for floats.
fconst(int value)330 void TemplateTable::fconst(int value) {
331 transition(vtos, ftos);
332 static float one = 1.0f, two = 2.0f;
333
334 switch (value) {
335 case 0:
336 __ z_lzer(Z_ftos);
337 return;
338 case 1:
339 __ load_absolute_address(Z_R1_scratch, (address) &one);
340 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false);
341 return;
342 case 2:
343 __ load_absolute_address(Z_R1_scratch, (address) &two);
344 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false);
345 return;
346 default:
347 ShouldNotReachHere();
348 return;
349 }
350 }
351
dconst(int value)352 void TemplateTable::dconst(int value) {
353 transition(vtos, dtos);
354 static double one = 1.0;
355
356 switch (value) {
357 case 0:
358 __ z_lzdr(Z_ftos);
359 return;
360 case 1:
361 __ load_absolute_address(Z_R1_scratch, (address) &one);
362 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch));
363 return;
364 default:
365 ShouldNotReachHere();
366 return;
367 }
368 }
369
bipush()370 void TemplateTable::bipush() {
371 transition(vtos, itos);
372 __ z_lb(Z_tos, at_bcp(1));
373 }
374
sipush()375 void TemplateTable::sipush() {
376 transition(vtos, itos);
377 __ get_2_byte_integer_at_bcp(Z_tos, 1, InterpreterMacroAssembler::Signed);
378 }
379
380
ldc(bool wide)381 void TemplateTable::ldc(bool wide) {
382 transition(vtos, vtos);
383 Label call_ldc, notFloat, notClass, notInt, Done;
384 const Register RcpIndex = Z_tmp_1;
385 const Register Rtags = Z_ARG2;
386
387 if (wide) {
388 __ get_2_byte_integer_at_bcp(RcpIndex, 1, InterpreterMacroAssembler::Unsigned);
389 } else {
390 __ z_llgc(RcpIndex, at_bcp(1));
391 }
392
393 __ get_cpool_and_tags(Z_tmp_2, Rtags);
394
395 const int base_offset = ConstantPool::header_size() * wordSize;
396 const int tags_offset = Array<u1>::base_offset_in_bytes();
397 const Register Raddr_type = Rtags;
398
399 // Get address of type.
400 __ add2reg_with_index(Raddr_type, tags_offset, RcpIndex, Rtags);
401
402 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClass);
403 __ z_bre(call_ldc); // Unresolved class - get the resolved class.
404
405 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClassInError);
406 __ z_bre(call_ldc); // Unresolved class in error state - call into runtime
407 // to throw the error from the first resolution attempt.
408
409 __ z_cli(0, Raddr_type, JVM_CONSTANT_Class);
410 __ z_brne(notClass); // Resolved class - need to call vm to get java
411 // mirror of the class.
412
413 // We deal with a class. Call vm to do the appropriate.
414 __ bind(call_ldc);
415 __ load_const_optimized(Z_ARG2, wide);
416 call_VM(Z_RET, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), Z_ARG2);
417 __ push_ptr(Z_RET);
418 __ z_bru(Done);
419
420 // Not a class.
421 __ bind(notClass);
422 Register RcpOffset = RcpIndex;
423 __ z_sllg(RcpOffset, RcpIndex, LogBytesPerWord); // Convert index to offset.
424 __ z_cli(0, Raddr_type, JVM_CONSTANT_Float);
425 __ z_brne(notFloat);
426
427 // ftos
428 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, RcpOffset, base_offset), false);
429 __ push_f();
430 __ z_bru(Done);
431
432 __ bind(notFloat);
433 __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer);
434 __ z_brne(notInt);
435
436 // itos
437 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, RcpOffset, base_offset), false);
438 __ push_i(Z_tos);
439 __ z_bru(Done);
440
441 // assume the tag is for condy; if not, the VM runtime will tell us
442 __ bind(notInt);
443 condy_helper(Done);
444
445 __ bind(Done);
446 }
447
448 // Fast path for caching oop constants.
449 // %%% We should use this to handle Class and String constants also.
450 // %%% It will simplify the ldc/primitive path considerably.
fast_aldc(bool wide)451 void TemplateTable::fast_aldc(bool wide) {
452 transition(vtos, atos);
453
454 const Register index = Z_tmp_2;
455 int index_size = wide ? sizeof(u2) : sizeof(u1);
456 Label L_do_resolve, L_resolved;
457
458 // We are resolved if the resolved reference cache entry contains a
459 // non-null object (CallSite, etc.).
460 __ get_cache_index_at_bcp(index, 1, index_size); // Load index.
461 __ load_resolved_reference_at_index(Z_tos, index);
462 __ z_ltgr(Z_tos, Z_tos);
463 __ z_bre(L_do_resolve);
464
465 // Convert null sentinel to NULL.
466 __ load_const_optimized(Z_R1_scratch, (intptr_t)Universe::the_null_sentinel_addr());
467 __ z_cg(Z_tos, Address(Z_R1_scratch));
468 __ z_brne(L_resolved);
469 __ clear_reg(Z_tos);
470 __ z_bru(L_resolved);
471
472 __ bind(L_do_resolve);
473 // First time invocation - must resolve first.
474 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
475 __ load_const_optimized(Z_ARG1, (int)bytecode());
476 __ call_VM(Z_tos, entry, Z_ARG1);
477
478 __ bind(L_resolved);
479 __ verify_oop(Z_tos);
480 }
481
ldc2_w()482 void TemplateTable::ldc2_w() {
483 transition(vtos, vtos);
484 Label notDouble, notLong, Done;
485
486 // Z_tmp_1 = index of cp entry
487 __ get_2_byte_integer_at_bcp(Z_tmp_1, 1, InterpreterMacroAssembler::Unsigned);
488
489 __ get_cpool_and_tags(Z_tmp_2, Z_tos);
490
491 const int base_offset = ConstantPool::header_size() * wordSize;
492 const int tags_offset = Array<u1>::base_offset_in_bytes();
493
494 // Get address of type.
495 __ add2reg_with_index(Z_tos, tags_offset, Z_tos, Z_tmp_1);
496
497 // Index needed in both branches, so calculate here.
498 __ z_sllg(Z_tmp_1, Z_tmp_1, LogBytesPerWord); // index2bytes
499
500 // Check type.
501 __ z_cli(0, Z_tos, JVM_CONSTANT_Double);
502 __ z_brne(notDouble);
503 // dtos
504 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, Z_tmp_1, base_offset));
505 __ push_d();
506 __ z_bru(Done);
507
508 __ bind(notDouble);
509 __ z_cli(0, Z_tos, JVM_CONSTANT_Long);
510 __ z_brne(notLong);
511 // ltos
512 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, Z_tmp_1, base_offset));
513 __ push_l();
514 __ z_bru(Done);
515
516 __ bind(notLong);
517 condy_helper(Done);
518
519 __ bind(Done);
520 }
521
condy_helper(Label & Done)522 void TemplateTable::condy_helper(Label& Done) {
523 const Register obj = Z_tmp_1;
524 const Register off = Z_tmp_2;
525 const Register flags = Z_ARG1;
526 const Register rarg = Z_ARG2;
527 __ load_const_optimized(rarg, (int)bytecode());
528 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
529 __ get_vm_result_2(flags);
530
531 // VMr = obj = base address to find primitive value to push
532 // VMr2 = flags = (tos, off) using format of CPCE::_flags
533 assert(ConstantPoolCacheEntry::field_index_mask == 0xffff, "or use other instructions");
534 __ z_llghr(off, flags);
535 const Address field(obj, off);
536
537 // What sort of thing are we loading?
538 __ z_srl(flags, ConstantPoolCacheEntry::tos_state_shift);
539 // Make sure we don't need to mask flags for tos_state after the above shift.
540 ConstantPoolCacheEntry::verify_tos_state_shift();
541
542 switch (bytecode()) {
543 case Bytecodes::_ldc:
544 case Bytecodes::_ldc_w:
545 {
546 // tos in (itos, ftos, stos, btos, ctos, ztos)
547 Label notInt, notFloat, notShort, notByte, notChar, notBool;
548 __ z_cghi(flags, itos);
549 __ z_brne(notInt);
550 // itos
551 __ z_l(Z_tos, field);
552 __ push(itos);
553 __ z_bru(Done);
554
555 __ bind(notInt);
556 __ z_cghi(flags, ftos);
557 __ z_brne(notFloat);
558 // ftos
559 __ z_le(Z_ftos, field);
560 __ push(ftos);
561 __ z_bru(Done);
562
563 __ bind(notFloat);
564 __ z_cghi(flags, stos);
565 __ z_brne(notShort);
566 // stos
567 __ z_lh(Z_tos, field);
568 __ push(stos);
569 __ z_bru(Done);
570
571 __ bind(notShort);
572 __ z_cghi(flags, btos);
573 __ z_brne(notByte);
574 // btos
575 __ z_lb(Z_tos, field);
576 __ push(btos);
577 __ z_bru(Done);
578
579 __ bind(notByte);
580 __ z_cghi(flags, ctos);
581 __ z_brne(notChar);
582 // ctos
583 __ z_llh(Z_tos, field);
584 __ push(ctos);
585 __ z_bru(Done);
586
587 __ bind(notChar);
588 __ z_cghi(flags, ztos);
589 __ z_brne(notBool);
590 // ztos
591 __ z_lb(Z_tos, field);
592 __ push(ztos);
593 __ z_bru(Done);
594
595 __ bind(notBool);
596 break;
597 }
598
599 case Bytecodes::_ldc2_w:
600 {
601 Label notLong, notDouble;
602 __ z_cghi(flags, ltos);
603 __ z_brne(notLong);
604 // ltos
605 __ z_lg(Z_tos, field);
606 __ push(ltos);
607 __ z_bru(Done);
608
609 __ bind(notLong);
610 __ z_cghi(flags, dtos);
611 __ z_brne(notDouble);
612 // dtos
613 __ z_ld(Z_ftos, field);
614 __ push(dtos);
615 __ z_bru(Done);
616
617 __ bind(notDouble);
618 break;
619 }
620
621 default:
622 ShouldNotReachHere();
623 }
624
625 __ stop("bad ldc/condy");
626 }
627
locals_index(Register reg,int offset)628 void TemplateTable::locals_index(Register reg, int offset) {
629 __ z_llgc(reg, at_bcp(offset));
630 __ z_lcgr(reg);
631 }
632
iload()633 void TemplateTable::iload() {
634 iload_internal();
635 }
636
nofast_iload()637 void TemplateTable::nofast_iload() {
638 iload_internal(may_not_rewrite);
639 }
640
iload_internal(RewriteControl rc)641 void TemplateTable::iload_internal(RewriteControl rc) {
642 transition(vtos, itos);
643
644 if (RewriteFrequentPairs && rc == may_rewrite) {
645 NearLabel rewrite, done;
646 const Register bc = Z_ARG4;
647
648 assert(Z_R1_scratch != bc, "register damaged");
649
650 // Get next byte.
651 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_iload)));
652
653 // If _iload, wait to rewrite to iload2. We only want to rewrite the
654 // last two iloads in a pair. Comparing against fast_iload means that
655 // the next bytecode is neither an iload or a caload, and therefore
656 // an iload pair.
657 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_iload,
658 Assembler::bcondEqual, done);
659
660 __ load_const_optimized(bc, Bytecodes::_fast_iload2);
661 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_iload,
662 Assembler::bcondEqual, rewrite);
663
664 // If _caload, rewrite to fast_icaload.
665 __ load_const_optimized(bc, Bytecodes::_fast_icaload);
666 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_caload,
667 Assembler::bcondEqual, rewrite);
668
669 // Rewrite so iload doesn't check again.
670 __ load_const_optimized(bc, Bytecodes::_fast_iload);
671
672 // rewrite
673 // bc: fast bytecode
674 __ bind(rewrite);
675 patch_bytecode(Bytecodes::_iload, bc, Z_R1_scratch, false);
676
677 __ bind(done);
678
679 }
680
681 // Get the local value into tos.
682 locals_index(Z_R1_scratch);
683 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
684 }
685
fast_iload2()686 void TemplateTable::fast_iload2() {
687 transition(vtos, itos);
688
689 locals_index(Z_R1_scratch);
690 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
691 __ push_i(Z_tos);
692 locals_index(Z_R1_scratch, 3);
693 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
694 }
695
fast_iload()696 void TemplateTable::fast_iload() {
697 transition(vtos, itos);
698
699 locals_index(Z_R1_scratch);
700 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
701 }
702
lload()703 void TemplateTable::lload() {
704 transition(vtos, ltos);
705
706 locals_index(Z_R1_scratch);
707 __ mem2reg_opt(Z_tos, laddress(_masm, Z_R1_scratch));
708 }
709
fload()710 void TemplateTable::fload() {
711 transition(vtos, ftos);
712
713 locals_index(Z_R1_scratch);
714 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_R1_scratch), false);
715 }
716
dload()717 void TemplateTable::dload() {
718 transition(vtos, dtos);
719
720 locals_index(Z_R1_scratch);
721 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_R1_scratch));
722 }
723
aload()724 void TemplateTable::aload() {
725 transition(vtos, atos);
726
727 locals_index(Z_R1_scratch);
728 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_R1_scratch));
729 }
730
locals_index_wide(Register reg)731 void TemplateTable::locals_index_wide(Register reg) {
732 __ get_2_byte_integer_at_bcp(reg, 2, InterpreterMacroAssembler::Unsigned);
733 __ z_lcgr(reg);
734 }
735
wide_iload()736 void TemplateTable::wide_iload() {
737 transition(vtos, itos);
738
739 locals_index_wide(Z_tmp_1);
740 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_tmp_1), false);
741 }
742
wide_lload()743 void TemplateTable::wide_lload() {
744 transition(vtos, ltos);
745
746 locals_index_wide(Z_tmp_1);
747 __ mem2reg_opt(Z_tos, laddress(_masm, Z_tmp_1));
748 }
749
wide_fload()750 void TemplateTable::wide_fload() {
751 transition(vtos, ftos);
752
753 locals_index_wide(Z_tmp_1);
754 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_tmp_1), false);
755 }
756
wide_dload()757 void TemplateTable::wide_dload() {
758 transition(vtos, dtos);
759
760 locals_index_wide(Z_tmp_1);
761 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_tmp_1));
762 }
763
wide_aload()764 void TemplateTable::wide_aload() {
765 transition(vtos, atos);
766
767 locals_index_wide(Z_tmp_1);
768 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_tmp_1));
769 }
770
index_check(Register array,Register index,unsigned int shift)771 void TemplateTable::index_check(Register array, Register index, unsigned int shift) {
772 assert_different_registers(Z_R1_scratch, array, index);
773
774 // Check array.
775 __ null_check(array, Z_R0_scratch, arrayOopDesc::length_offset_in_bytes());
776
777 // Sign extend index for use by indexed load.
778 __ z_lgfr(index, index);
779
780 // Check index.
781 Label index_ok;
782 __ z_cl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
783 __ z_brl(index_ok);
784 __ lgr_if_needed(Z_ARG3, index); // See generate_ArrayIndexOutOfBounds_handler().
785 // Pass the array to create more detailed exceptions.
786 __ lgr_if_needed(Z_ARG2, array); // See generate_ArrayIndexOutOfBounds_handler().
787 __ load_absolute_address(Z_R1_scratch,
788 Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
789 __ z_bcr(Assembler::bcondAlways, Z_R1_scratch);
790 __ bind(index_ok);
791
792 if (shift > 0)
793 __ z_sllg(index, index, shift);
794 }
795
iaload()796 void TemplateTable::iaload() {
797 transition(itos, itos);
798
799 __ pop_ptr(Z_tmp_1); // array
800 // Index is in Z_tos.
801 Register index = Z_tos;
802 index_check(Z_tmp_1, index, LogBytesPerInt); // Kills Z_ARG3.
803 // Load the value.
804 __ mem2reg_opt(Z_tos,
805 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)),
806 false);
807 }
808
laload()809 void TemplateTable::laload() {
810 transition(itos, ltos);
811
812 __ pop_ptr(Z_tmp_2);
813 // Z_tos : index
814 // Z_tmp_2 : array
815 Register index = Z_tos;
816 index_check(Z_tmp_2, index, LogBytesPerLong);
817 __ mem2reg_opt(Z_tos,
818 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_LONG)));
819 }
820
faload()821 void TemplateTable::faload() {
822 transition(itos, ftos);
823
824 __ pop_ptr(Z_tmp_2);
825 // Z_tos : index
826 // Z_tmp_2 : array
827 Register index = Z_tos;
828 index_check(Z_tmp_2, index, LogBytesPerInt);
829 __ mem2freg_opt(Z_ftos,
830 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
831 false);
832 }
833
daload()834 void TemplateTable::daload() {
835 transition(itos, dtos);
836
837 __ pop_ptr(Z_tmp_2);
838 // Z_tos : index
839 // Z_tmp_2 : array
840 Register index = Z_tos;
841 index_check(Z_tmp_2, index, LogBytesPerLong);
842 __ mem2freg_opt(Z_ftos,
843 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
844 }
845
aaload()846 void TemplateTable::aaload() {
847 transition(itos, atos);
848
849 unsigned const int shift = LogBytesPerHeapOop;
850 __ pop_ptr(Z_tmp_1); // array
851 // Index is in Z_tos.
852 Register index = Z_tos;
853 index_check(Z_tmp_1, index, shift);
854 // Now load array element.
855 do_oop_load(_masm, Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), Z_tos,
856 Z_tmp_2, Z_tmp_3, IS_ARRAY);
857 __ verify_oop(Z_tos);
858 }
859
baload()860 void TemplateTable::baload() {
861 transition(itos, itos);
862
863 __ pop_ptr(Z_tmp_1);
864 // Z_tos : index
865 // Z_tmp_1 : array
866 Register index = Z_tos;
867 index_check(Z_tmp_1, index, 0);
868 __ z_lb(Z_tos,
869 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
870 }
871
caload()872 void TemplateTable::caload() {
873 transition(itos, itos);
874
875 __ pop_ptr(Z_tmp_2);
876 // Z_tos : index
877 // Z_tmp_2 : array
878 Register index = Z_tos;
879 index_check(Z_tmp_2, index, LogBytesPerShort);
880 // Load into 64 bits, works on all CPUs.
881 __ z_llgh(Z_tos,
882 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
883 }
884
885 // Iload followed by caload frequent pair.
fast_icaload()886 void TemplateTable::fast_icaload() {
887 transition(vtos, itos);
888
889 // Load index out of locals.
890 locals_index(Z_R1_scratch);
891 __ mem2reg_opt(Z_ARG3, iaddress(_masm, Z_R1_scratch), false);
892 // Z_ARG3 : index
893 // Z_tmp_2 : array
894 __ pop_ptr(Z_tmp_2);
895 index_check(Z_tmp_2, Z_ARG3, LogBytesPerShort);
896 // Load into 64 bits, works on all CPUs.
897 __ z_llgh(Z_tos,
898 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
899 }
900
saload()901 void TemplateTable::saload() {
902 transition(itos, itos);
903
904 __ pop_ptr(Z_tmp_2);
905 // Z_tos : index
906 // Z_tmp_2 : array
907 Register index = Z_tos;
908 index_check(Z_tmp_2, index, LogBytesPerShort);
909 __ z_lh(Z_tos,
910 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
911 }
912
iload(int n)913 void TemplateTable::iload(int n) {
914 transition(vtos, itos);
915 __ z_ly(Z_tos, iaddress(n));
916 }
917
lload(int n)918 void TemplateTable::lload(int n) {
919 transition(vtos, ltos);
920 __ z_lg(Z_tos, laddress(n));
921 }
922
fload(int n)923 void TemplateTable::fload(int n) {
924 transition(vtos, ftos);
925 __ mem2freg_opt(Z_ftos, faddress(n), false);
926 }
927
dload(int n)928 void TemplateTable::dload(int n) {
929 transition(vtos, dtos);
930 __ mem2freg_opt(Z_ftos, daddress(n));
931 }
932
aload(int n)933 void TemplateTable::aload(int n) {
934 transition(vtos, atos);
935 __ mem2reg_opt(Z_tos, aaddress(n));
936 }
937
aload_0()938 void TemplateTable::aload_0() {
939 aload_0_internal();
940 }
941
nofast_aload_0()942 void TemplateTable::nofast_aload_0() {
943 aload_0_internal(may_not_rewrite);
944 }
945
aload_0_internal(RewriteControl rc)946 void TemplateTable::aload_0_internal(RewriteControl rc) {
947 transition(vtos, atos);
948
949 // According to bytecode histograms, the pairs:
950 //
951 // _aload_0, _fast_igetfield
952 // _aload_0, _fast_agetfield
953 // _aload_0, _fast_fgetfield
954 //
955 // occur frequently. If RewriteFrequentPairs is set, the (slow)
956 // _aload_0 bytecode checks if the next bytecode is either
957 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
958 // rewrites the current bytecode into a pair bytecode; otherwise it
959 // rewrites the current bytecode into _fast_aload_0 that doesn't do
960 // the pair check anymore.
961 //
962 // Note: If the next bytecode is _getfield, the rewrite must be
963 // delayed, otherwise we may miss an opportunity for a pair.
964 //
965 // Also rewrite frequent pairs
966 // aload_0, aload_1
967 // aload_0, iload_1
968 // These bytecodes with a small amount of code are most profitable
969 // to rewrite.
970 if (!(RewriteFrequentPairs && (rc == may_rewrite))) {
971 aload(0);
972 return;
973 }
974
975 NearLabel rewrite, done;
976 const Register bc = Z_ARG4;
977
978 assert(Z_R1_scratch != bc, "register damaged");
979 // Get next byte.
980 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_aload_0)));
981
982 // Do actual aload_0.
983 aload(0);
984
985 // If _getfield then wait with rewrite.
986 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_getfield,
987 Assembler::bcondEqual, done);
988
989 // If _igetfield then rewrite to _fast_iaccess_0.
990 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0)
991 == Bytecodes::_aload_0, "fix bytecode definition");
992
993 __ load_const_optimized(bc, Bytecodes::_fast_iaccess_0);
994 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_igetfield,
995 Assembler::bcondEqual, rewrite);
996
997 // If _agetfield then rewrite to _fast_aaccess_0.
998 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0)
999 == Bytecodes::_aload_0, "fix bytecode definition");
1000
1001 __ load_const_optimized(bc, Bytecodes::_fast_aaccess_0);
1002 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_agetfield,
1003 Assembler::bcondEqual, rewrite);
1004
1005 // If _fgetfield then rewrite to _fast_faccess_0.
1006 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0)
1007 == Bytecodes::_aload_0, "fix bytecode definition");
1008
1009 __ load_const_optimized(bc, Bytecodes::_fast_faccess_0);
1010 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_fgetfield,
1011 Assembler::bcondEqual, rewrite);
1012
1013 // Else rewrite to _fast_aload0.
1014 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0)
1015 == Bytecodes::_aload_0, "fix bytecode definition");
1016 __ load_const_optimized(bc, Bytecodes::_fast_aload_0);
1017
1018 // rewrite
1019 // bc: fast bytecode
1020 __ bind(rewrite);
1021
1022 patch_bytecode(Bytecodes::_aload_0, bc, Z_R1_scratch, false);
1023 // Reload local 0 because of VM call inside patch_bytecode().
1024 // this may trigger GC and thus change the oop.
1025 aload(0);
1026
1027 __ bind(done);
1028 }
1029
istore()1030 void TemplateTable::istore() {
1031 transition(itos, vtos);
1032 locals_index(Z_R1_scratch);
1033 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
1034 }
1035
lstore()1036 void TemplateTable::lstore() {
1037 transition(ltos, vtos);
1038 locals_index(Z_R1_scratch);
1039 __ reg2mem_opt(Z_tos, laddress(_masm, Z_R1_scratch));
1040 }
1041
fstore()1042 void TemplateTable::fstore() {
1043 transition(ftos, vtos);
1044 locals_index(Z_R1_scratch);
1045 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_R1_scratch));
1046 }
1047
dstore()1048 void TemplateTable::dstore() {
1049 transition(dtos, vtos);
1050 locals_index(Z_R1_scratch);
1051 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_R1_scratch));
1052 }
1053
astore()1054 void TemplateTable::astore() {
1055 transition(vtos, vtos);
1056 __ pop_ptr(Z_tos);
1057 locals_index(Z_R1_scratch);
1058 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_R1_scratch));
1059 }
1060
wide_istore()1061 void TemplateTable::wide_istore() {
1062 transition(vtos, vtos);
1063 __ pop_i(Z_tos);
1064 locals_index_wide(Z_tmp_1);
1065 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_tmp_1), false);
1066 }
1067
wide_lstore()1068 void TemplateTable::wide_lstore() {
1069 transition(vtos, vtos);
1070 __ pop_l(Z_tos);
1071 locals_index_wide(Z_tmp_1);
1072 __ reg2mem_opt(Z_tos, laddress(_masm, Z_tmp_1));
1073 }
1074
wide_fstore()1075 void TemplateTable::wide_fstore() {
1076 transition(vtos, vtos);
1077 __ pop_f(Z_ftos);
1078 locals_index_wide(Z_tmp_1);
1079 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_tmp_1), false);
1080 }
1081
wide_dstore()1082 void TemplateTable::wide_dstore() {
1083 transition(vtos, vtos);
1084 __ pop_d(Z_ftos);
1085 locals_index_wide(Z_tmp_1);
1086 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_tmp_1));
1087 }
1088
wide_astore()1089 void TemplateTable::wide_astore() {
1090 transition(vtos, vtos);
1091 __ pop_ptr(Z_tos);
1092 locals_index_wide(Z_tmp_1);
1093 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_tmp_1));
1094 }
1095
iastore()1096 void TemplateTable::iastore() {
1097 transition(itos, vtos);
1098
1099 Register index = Z_ARG3; // Index_check expects index in Z_ARG3.
1100 // Value is in Z_tos ...
1101 __ pop_i(index); // index
1102 __ pop_ptr(Z_tmp_1); // array
1103 index_check(Z_tmp_1, index, LogBytesPerInt);
1104 // ... and then move the value.
1105 __ reg2mem_opt(Z_tos,
1106 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)),
1107 false);
1108 }
1109
lastore()1110 void TemplateTable::lastore() {
1111 transition(ltos, vtos);
1112
1113 __ pop_i(Z_ARG3);
1114 __ pop_ptr(Z_tmp_2);
1115 // Z_tos : value
1116 // Z_ARG3 : index
1117 // Z_tmp_2 : array
1118 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3.
1119 __ reg2mem_opt(Z_tos,
1120 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_LONG)));
1121 }
1122
fastore()1123 void TemplateTable::fastore() {
1124 transition(ftos, vtos);
1125
1126 __ pop_i(Z_ARG3);
1127 __ pop_ptr(Z_tmp_2);
1128 // Z_ftos : value
1129 // Z_ARG3 : index
1130 // Z_tmp_2 : array
1131 index_check(Z_tmp_2, Z_ARG3, LogBytesPerInt); // Prefer index in Z_ARG3.
1132 __ freg2mem_opt(Z_ftos,
1133 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1134 false);
1135 }
1136
dastore()1137 void TemplateTable::dastore() {
1138 transition(dtos, vtos);
1139
1140 __ pop_i(Z_ARG3);
1141 __ pop_ptr(Z_tmp_2);
1142 // Z_ftos : value
1143 // Z_ARG3 : index
1144 // Z_tmp_2 : array
1145 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3.
1146 __ freg2mem_opt(Z_ftos,
1147 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
1148 }
1149
aastore()1150 void TemplateTable::aastore() {
1151 NearLabel is_null, ok_is_subtype, done;
1152 transition(vtos, vtos);
1153
1154 // stack: ..., array, index, value
1155
1156 Register Rvalue = Z_tos;
1157 Register Rarray = Z_ARG2;
1158 Register Rindex = Z_ARG3; // Convention for index_check().
1159
1160 __ load_ptr(0, Rvalue);
1161 __ z_l(Rindex, Address(Z_esp, Interpreter::expr_offset_in_bytes(1)));
1162 __ load_ptr(2, Rarray);
1163
1164 unsigned const int shift = LogBytesPerHeapOop;
1165 index_check(Rarray, Rindex, shift); // side effect: Rindex = Rindex << shift
1166 Register Rstore_addr = Rindex;
1167 // Address where the store goes to, i.e. &(Rarry[index])
1168 __ load_address(Rstore_addr, Address(Rarray, Rindex, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1169
1170 // do array store check - check for NULL value first.
1171 __ compareU64_and_branch(Rvalue, (intptr_t)0, Assembler::bcondEqual, is_null);
1172
1173 Register Rsub_klass = Z_ARG4;
1174 Register Rsuper_klass = Z_ARG5;
1175 __ load_klass(Rsub_klass, Rvalue);
1176 // Load superklass.
1177 __ load_klass(Rsuper_klass, Rarray);
1178 __ z_lg(Rsuper_klass, Address(Rsuper_klass, ObjArrayKlass::element_klass_offset()));
1179
1180 // Generate a fast subtype check. Branch to ok_is_subtype if no failure.
1181 // Throw if failure.
1182 Register tmp1 = Z_tmp_1;
1183 Register tmp2 = Z_tmp_2;
1184 __ gen_subtype_check(Rsub_klass, Rsuper_klass, tmp1, tmp2, ok_is_subtype);
1185
1186 // Fall through on failure.
1187 // Object is in Rvalue == Z_tos.
1188 assert(Rvalue == Z_tos, "that's the expected location");
1189 __ load_absolute_address(tmp1, Interpreter::_throw_ArrayStoreException_entry);
1190 __ z_br(tmp1);
1191
1192 Register tmp3 = Rsub_klass;
1193
1194 // Have a NULL in Rvalue.
1195 __ bind(is_null);
1196 __ profile_null_seen(tmp1);
1197
1198 // Store a NULL.
1199 do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), noreg,
1200 tmp3, tmp2, tmp1, IS_ARRAY);
1201 __ z_bru(done);
1202
1203 // Come here on success.
1204 __ bind(ok_is_subtype);
1205
1206 // Now store using the appropriate barrier.
1207 do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), Rvalue,
1208 tmp3, tmp2, tmp1, IS_ARRAY | IS_NOT_NULL);
1209
1210 // Pop stack arguments.
1211 __ bind(done);
1212 __ add2reg(Z_esp, 3 * Interpreter::stackElementSize);
1213 }
1214
1215
bastore()1216 void TemplateTable::bastore() {
1217 transition(itos, vtos);
1218
1219 __ pop_i(Z_ARG3);
1220 __ pop_ptr(Z_tmp_2);
1221 // Z_tos : value
1222 // Z_ARG3 : index
1223 // Z_tmp_2 : array
1224
1225 // Need to check whether array is boolean or byte
1226 // since both types share the bastore bytecode.
1227 __ load_klass(Z_tmp_1, Z_tmp_2);
1228 __ z_llgf(Z_tmp_1, Address(Z_tmp_1, Klass::layout_helper_offset()));
1229 __ z_tmll(Z_tmp_1, Klass::layout_helper_boolean_diffbit());
1230 Label L_skip;
1231 __ z_bfalse(L_skip);
1232 // if it is a T_BOOLEAN array, mask the stored value to 0/1
1233 __ z_nilf(Z_tos, 0x1);
1234 __ bind(L_skip);
1235
1236 // No index shift necessary - pass 0.
1237 index_check(Z_tmp_2, Z_ARG3, 0); // Prefer index in Z_ARG3.
1238 __ z_stc(Z_tos,
1239 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
1240 }
1241
castore()1242 void TemplateTable::castore() {
1243 transition(itos, vtos);
1244
1245 __ pop_i(Z_ARG3);
1246 __ pop_ptr(Z_tmp_2);
1247 // Z_tos : value
1248 // Z_ARG3 : index
1249 // Z_tmp_2 : array
1250 Register index = Z_ARG3; // prefer index in Z_ARG3
1251 index_check(Z_tmp_2, index, LogBytesPerShort);
1252 __ z_sth(Z_tos,
1253 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
1254 }
1255
sastore()1256 void TemplateTable::sastore() {
1257 castore();
1258 }
1259
istore(int n)1260 void TemplateTable::istore(int n) {
1261 transition(itos, vtos);
1262 __ reg2mem_opt(Z_tos, iaddress(n), false);
1263 }
1264
lstore(int n)1265 void TemplateTable::lstore(int n) {
1266 transition(ltos, vtos);
1267 __ reg2mem_opt(Z_tos, laddress(n));
1268 }
1269
fstore(int n)1270 void TemplateTable::fstore(int n) {
1271 transition(ftos, vtos);
1272 __ freg2mem_opt(Z_ftos, faddress(n), false);
1273 }
1274
dstore(int n)1275 void TemplateTable::dstore(int n) {
1276 transition(dtos, vtos);
1277 __ freg2mem_opt(Z_ftos, daddress(n));
1278 }
1279
astore(int n)1280 void TemplateTable::astore(int n) {
1281 transition(vtos, vtos);
1282 __ pop_ptr(Z_tos);
1283 __ reg2mem_opt(Z_tos, aaddress(n));
1284 }
1285
pop()1286 void TemplateTable::pop() {
1287 transition(vtos, vtos);
1288 __ add2reg(Z_esp, Interpreter::stackElementSize);
1289 }
1290
pop2()1291 void TemplateTable::pop2() {
1292 transition(vtos, vtos);
1293 __ add2reg(Z_esp, 2 * Interpreter::stackElementSize);
1294 }
1295
dup()1296 void TemplateTable::dup() {
1297 transition(vtos, vtos);
1298 __ load_ptr(0, Z_tos);
1299 __ push_ptr(Z_tos);
1300 // stack: ..., a, a
1301 }
1302
dup_x1()1303 void TemplateTable::dup_x1() {
1304 transition(vtos, vtos);
1305
1306 // stack: ..., a, b
1307 __ load_ptr(0, Z_tos); // load b
1308 __ load_ptr(1, Z_R0_scratch); // load a
1309 __ store_ptr(1, Z_tos); // store b
1310 __ store_ptr(0, Z_R0_scratch); // store a
1311 __ push_ptr(Z_tos); // push b
1312 // stack: ..., b, a, b
1313 }
1314
dup_x2()1315 void TemplateTable::dup_x2() {
1316 transition(vtos, vtos);
1317
1318 // stack: ..., a, b, c
1319 __ load_ptr(0, Z_R0_scratch); // load c
1320 __ load_ptr(2, Z_R1_scratch); // load a
1321 __ store_ptr(2, Z_R0_scratch); // store c in a
1322 __ push_ptr(Z_R0_scratch); // push c
1323 // stack: ..., c, b, c, c
1324 __ load_ptr(2, Z_R0_scratch); // load b
1325 __ store_ptr(2, Z_R1_scratch); // store a in b
1326 // stack: ..., c, a, c, c
1327 __ store_ptr(1, Z_R0_scratch); // store b in c
1328 // stack: ..., c, a, b, c
1329 }
1330
dup2()1331 void TemplateTable::dup2() {
1332 transition(vtos, vtos);
1333
1334 // stack: ..., a, b
1335 __ load_ptr(1, Z_R0_scratch); // load a
1336 __ push_ptr(Z_R0_scratch); // push a
1337 __ load_ptr(1, Z_R0_scratch); // load b
1338 __ push_ptr(Z_R0_scratch); // push b
1339 // stack: ..., a, b, a, b
1340 }
1341
dup2_x1()1342 void TemplateTable::dup2_x1() {
1343 transition(vtos, vtos);
1344
1345 // stack: ..., a, b, c
1346 __ load_ptr(0, Z_R0_scratch); // load c
1347 __ load_ptr(1, Z_R1_scratch); // load b
1348 __ push_ptr(Z_R1_scratch); // push b
1349 __ push_ptr(Z_R0_scratch); // push c
1350 // stack: ..., a, b, c, b, c
1351 __ store_ptr(3, Z_R0_scratch); // store c in b
1352 // stack: ..., a, c, c, b, c
1353 __ load_ptr( 4, Z_R0_scratch); // load a
1354 __ store_ptr(2, Z_R0_scratch); // store a in 2nd c
1355 // stack: ..., a, c, a, b, c
1356 __ store_ptr(4, Z_R1_scratch); // store b in a
1357 // stack: ..., b, c, a, b, c
1358 }
1359
dup2_x2()1360 void TemplateTable::dup2_x2() {
1361 transition(vtos, vtos);
1362
1363 // stack: ..., a, b, c, d
1364 __ load_ptr(0, Z_R0_scratch); // load d
1365 __ load_ptr(1, Z_R1_scratch); // load c
1366 __ push_ptr(Z_R1_scratch); // push c
1367 __ push_ptr(Z_R0_scratch); // push d
1368 // stack: ..., a, b, c, d, c, d
1369 __ load_ptr(4, Z_R1_scratch); // load b
1370 __ store_ptr(2, Z_R1_scratch); // store b in d
1371 __ store_ptr(4, Z_R0_scratch); // store d in b
1372 // stack: ..., a, d, c, b, c, d
1373 __ load_ptr(5, Z_R0_scratch); // load a
1374 __ load_ptr(3, Z_R1_scratch); // load c
1375 __ store_ptr(3, Z_R0_scratch); // store a in c
1376 __ store_ptr(5, Z_R1_scratch); // store c in a
1377 // stack: ..., c, d, a, b, c, d
1378 }
1379
swap()1380 void TemplateTable::swap() {
1381 transition(vtos, vtos);
1382
1383 // stack: ..., a, b
1384 __ load_ptr(1, Z_R0_scratch); // load a
1385 __ load_ptr(0, Z_R1_scratch); // load b
1386 __ store_ptr(0, Z_R0_scratch); // store a in b
1387 __ store_ptr(1, Z_R1_scratch); // store b in a
1388 // stack: ..., b, a
1389 }
1390
iop2(Operation op)1391 void TemplateTable::iop2(Operation op) {
1392 transition(itos, itos);
1393 switch (op) {
1394 case add : __ z_ay(Z_tos, __ stackTop()); __ pop_i(); break;
1395 case sub : __ z_sy(Z_tos, __ stackTop()); __ pop_i(); __ z_lcr(Z_tos, Z_tos); break;
1396 case mul : __ z_msy(Z_tos, __ stackTop()); __ pop_i(); break;
1397 case _and : __ z_ny(Z_tos, __ stackTop()); __ pop_i(); break;
1398 case _or : __ z_oy(Z_tos, __ stackTop()); __ pop_i(); break;
1399 case _xor : __ z_xy(Z_tos, __ stackTop()); __ pop_i(); break;
1400 case shl : __ z_lr(Z_tmp_1, Z_tos);
1401 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount.
1402 __ pop_i(Z_tos); __ z_sll(Z_tos, 0, Z_tmp_1); break;
1403 case shr : __ z_lr(Z_tmp_1, Z_tos);
1404 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount.
1405 __ pop_i(Z_tos); __ z_sra(Z_tos, 0, Z_tmp_1); break;
1406 case ushr : __ z_lr(Z_tmp_1, Z_tos);
1407 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount.
1408 __ pop_i(Z_tos); __ z_srl(Z_tos, 0, Z_tmp_1); break;
1409 default : ShouldNotReachHere(); break;
1410 }
1411 return;
1412 }
1413
lop2(Operation op)1414 void TemplateTable::lop2(Operation op) {
1415 transition(ltos, ltos);
1416
1417 switch (op) {
1418 case add : __ z_ag(Z_tos, __ stackTop()); __ pop_l(); break;
1419 case sub : __ z_sg(Z_tos, __ stackTop()); __ pop_l(); __ z_lcgr(Z_tos, Z_tos); break;
1420 case mul : __ z_msg(Z_tos, __ stackTop()); __ pop_l(); break;
1421 case _and : __ z_ng(Z_tos, __ stackTop()); __ pop_l(); break;
1422 case _or : __ z_og(Z_tos, __ stackTop()); __ pop_l(); break;
1423 case _xor : __ z_xg(Z_tos, __ stackTop()); __ pop_l(); break;
1424 default : ShouldNotReachHere(); break;
1425 }
1426 return;
1427 }
1428
1429 // Common part of idiv/irem.
idiv_helper(InterpreterMacroAssembler * _masm,address exception)1430 static void idiv_helper(InterpreterMacroAssembler * _masm, address exception) {
1431 NearLabel not_null;
1432
1433 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE.
1434 assert(Z_tmp_1->successor() == Z_tmp_2, " need even/odd register pair for idiv/irem");
1435
1436 // Get dividend.
1437 __ pop_i(Z_tmp_2);
1438
1439 // If divisor == 0 throw exception.
1440 __ compare32_and_branch(Z_tos, (intptr_t) 0,
1441 Assembler::bcondNotEqual, not_null );
1442 __ load_absolute_address(Z_R1_scratch, exception);
1443 __ z_br(Z_R1_scratch);
1444
1445 __ bind(not_null);
1446
1447 __ z_lgfr(Z_tmp_2, Z_tmp_2); // Sign extend dividend.
1448 __ z_dsgfr(Z_tmp_1, Z_tos); // Do it.
1449 }
1450
idiv()1451 void TemplateTable::idiv() {
1452 transition(itos, itos);
1453
1454 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry);
1455 __ z_llgfr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2.
1456 }
1457
irem()1458 void TemplateTable::irem() {
1459 transition(itos, itos);
1460
1461 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry);
1462 __ z_llgfr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1.
1463 }
1464
lmul()1465 void TemplateTable::lmul() {
1466 transition(ltos, ltos);
1467
1468 // Multiply with memory operand.
1469 __ z_msg(Z_tos, __ stackTop());
1470 __ pop_l(); // Pop operand.
1471 }
1472
1473 // Common part of ldiv/lrem.
1474 //
1475 // Input:
1476 // Z_tos := the divisor (dividend still on stack)
1477 //
1478 // Updated registers:
1479 // Z_tmp_1 := pop_l() % Z_tos ; if is_ldiv == false
1480 // Z_tmp_2 := pop_l() / Z_tos ; if is_ldiv == true
1481 //
ldiv_helper(InterpreterMacroAssembler * _masm,address exception,bool is_ldiv)1482 static void ldiv_helper(InterpreterMacroAssembler * _masm, address exception, bool is_ldiv) {
1483 NearLabel not_null, done;
1484
1485 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE.
1486 assert(Z_tmp_1->successor() == Z_tmp_2,
1487 " need even/odd register pair for idiv/irem");
1488
1489 // Get dividend.
1490 __ pop_l(Z_tmp_2);
1491
1492 // If divisor == 0 throw exception.
1493 __ compare64_and_branch(Z_tos, (intptr_t)0, Assembler::bcondNotEqual, not_null);
1494 __ load_absolute_address(Z_R1_scratch, exception);
1495 __ z_br(Z_R1_scratch);
1496
1497 __ bind(not_null);
1498 // Special case for dividend == 0x8000 and divisor == -1.
1499 if (is_ldiv) {
1500 // result := Z_tmp_2 := - dividend
1501 __ z_lcgr(Z_tmp_2, Z_tmp_2);
1502 } else {
1503 // result remainder := Z_tmp_1 := 0
1504 __ clear_reg(Z_tmp_1, true, false); // Don't set CC.
1505 }
1506
1507 // if divisor == -1 goto done
1508 __ compare64_and_branch(Z_tos, -1, Assembler::bcondEqual, done);
1509 if (is_ldiv)
1510 // Restore sign, because divisor != -1.
1511 __ z_lcgr(Z_tmp_2, Z_tmp_2);
1512 __ z_dsgr(Z_tmp_1, Z_tos); // Do it.
1513 __ bind(done);
1514 }
1515
ldiv()1516 void TemplateTable::ldiv() {
1517 transition(ltos, ltos);
1518
1519 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, true /*is_ldiv*/);
1520 __ z_lgr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2.
1521 }
1522
lrem()1523 void TemplateTable::lrem() {
1524 transition(ltos, ltos);
1525
1526 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, false /*is_ldiv*/);
1527 __ z_lgr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1.
1528 }
1529
lshl()1530 void TemplateTable::lshl() {
1531 transition(itos, ltos);
1532
1533 // Z_tos: shift amount
1534 __ pop_l(Z_tmp_1); // Get shift value.
1535 __ z_sllg(Z_tos, Z_tmp_1, 0, Z_tos);
1536 }
1537
lshr()1538 void TemplateTable::lshr() {
1539 transition(itos, ltos);
1540
1541 // Z_tos: shift amount
1542 __ pop_l(Z_tmp_1); // Get shift value.
1543 __ z_srag(Z_tos, Z_tmp_1, 0, Z_tos);
1544 }
1545
lushr()1546 void TemplateTable::lushr() {
1547 transition(itos, ltos);
1548
1549 // Z_tos: shift amount
1550 __ pop_l(Z_tmp_1); // Get shift value.
1551 __ z_srlg(Z_tos, Z_tmp_1, 0, Z_tos);
1552 }
1553
fop2(Operation op)1554 void TemplateTable::fop2(Operation op) {
1555 transition(ftos, ftos);
1556
1557 switch (op) {
1558 case add:
1559 // Add memory operand.
1560 __ z_aeb(Z_ftos, __ stackTop()); __ pop_f(); return;
1561 case sub:
1562 // Sub memory operand.
1563 __ z_ler(Z_F1, Z_ftos); // first operand
1564 __ pop_f(Z_ftos); // second operand from stack
1565 __ z_sebr(Z_ftos, Z_F1);
1566 return;
1567 case mul:
1568 // Multiply with memory operand.
1569 __ z_meeb(Z_ftos, __ stackTop()); __ pop_f(); return;
1570 case div:
1571 __ z_ler(Z_F1, Z_ftos); // first operand
1572 __ pop_f(Z_ftos); // second operand from stack
1573 __ z_debr(Z_ftos, Z_F1);
1574 return;
1575 case rem:
1576 // Do runtime call.
1577 __ z_ler(Z_FARG2, Z_ftos); // divisor
1578 __ pop_f(Z_FARG1); // dividend
1579 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1580 // Result should be in the right place (Z_ftos == Z_FRET).
1581 return;
1582 default:
1583 ShouldNotReachHere();
1584 return;
1585 }
1586 }
1587
dop2(Operation op)1588 void TemplateTable::dop2(Operation op) {
1589 transition(dtos, dtos);
1590
1591 switch (op) {
1592 case add:
1593 // Add memory operand.
1594 __ z_adb(Z_ftos, __ stackTop()); __ pop_d(); return;
1595 case sub:
1596 // Sub memory operand.
1597 __ z_ldr(Z_F1, Z_ftos); // first operand
1598 __ pop_d(Z_ftos); // second operand from stack
1599 __ z_sdbr(Z_ftos, Z_F1);
1600 return;
1601 case mul:
1602 // Multiply with memory operand.
1603 __ z_mdb(Z_ftos, __ stackTop()); __ pop_d(); return;
1604 case div:
1605 __ z_ldr(Z_F1, Z_ftos); // first operand
1606 __ pop_d(Z_ftos); // second operand from stack
1607 __ z_ddbr(Z_ftos, Z_F1);
1608 return;
1609 case rem:
1610 // Do runtime call.
1611 __ z_ldr(Z_FARG2, Z_ftos); // divisor
1612 __ pop_d(Z_FARG1); // dividend
1613 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1614 // Result should be in the right place (Z_ftos == Z_FRET).
1615 return;
1616 default:
1617 ShouldNotReachHere();
1618 return;
1619 }
1620 }
1621
ineg()1622 void TemplateTable::ineg() {
1623 transition(itos, itos);
1624 __ z_lcr(Z_tos);
1625 }
1626
lneg()1627 void TemplateTable::lneg() {
1628 transition(ltos, ltos);
1629 __ z_lcgr(Z_tos);
1630 }
1631
fneg()1632 void TemplateTable::fneg() {
1633 transition(ftos, ftos);
1634 __ z_lcebr(Z_ftos, Z_ftos);
1635 }
1636
dneg()1637 void TemplateTable::dneg() {
1638 transition(dtos, dtos);
1639 __ z_lcdbr(Z_ftos, Z_ftos);
1640 }
1641
iinc()1642 void TemplateTable::iinc() {
1643 transition(vtos, vtos);
1644
1645 Address local;
1646 __ z_lb(Z_R0_scratch, at_bcp(2)); // Get constant.
1647 locals_index(Z_R1_scratch);
1648 local = iaddress(_masm, Z_R1_scratch);
1649 __ z_a(Z_R0_scratch, local);
1650 __ reg2mem_opt(Z_R0_scratch, local, false);
1651 }
1652
wide_iinc()1653 void TemplateTable::wide_iinc() {
1654 transition(vtos, vtos);
1655
1656 // Z_tmp_1 := increment
1657 __ get_2_byte_integer_at_bcp(Z_tmp_1, 4, InterpreterMacroAssembler::Signed);
1658 // Z_R1_scratch := index of local to increment
1659 locals_index_wide(Z_tmp_2);
1660 // Load, increment, and store.
1661 __ access_local_int(Z_tmp_2, Z_tos);
1662 __ z_agr(Z_tos, Z_tmp_1);
1663 // Shifted index is still in Z_tmp_2.
1664 __ reg2mem_opt(Z_tos, Address(Z_locals, Z_tmp_2), false);
1665 }
1666
1667
convert()1668 void TemplateTable::convert() {
1669 // Checking
1670 #ifdef ASSERT
1671 TosState tos_in = ilgl;
1672 TosState tos_out = ilgl;
1673
1674 switch (bytecode()) {
1675 case Bytecodes::_i2l:
1676 case Bytecodes::_i2f:
1677 case Bytecodes::_i2d:
1678 case Bytecodes::_i2b:
1679 case Bytecodes::_i2c:
1680 case Bytecodes::_i2s:
1681 tos_in = itos;
1682 break;
1683 case Bytecodes::_l2i:
1684 case Bytecodes::_l2f:
1685 case Bytecodes::_l2d:
1686 tos_in = ltos;
1687 break;
1688 case Bytecodes::_f2i:
1689 case Bytecodes::_f2l:
1690 case Bytecodes::_f2d:
1691 tos_in = ftos;
1692 break;
1693 case Bytecodes::_d2i:
1694 case Bytecodes::_d2l:
1695 case Bytecodes::_d2f:
1696 tos_in = dtos;
1697 break;
1698 default :
1699 ShouldNotReachHere();
1700 }
1701 switch (bytecode()) {
1702 case Bytecodes::_l2i:
1703 case Bytecodes::_f2i:
1704 case Bytecodes::_d2i:
1705 case Bytecodes::_i2b:
1706 case Bytecodes::_i2c:
1707 case Bytecodes::_i2s:
1708 tos_out = itos;
1709 break;
1710 case Bytecodes::_i2l:
1711 case Bytecodes::_f2l:
1712 case Bytecodes::_d2l:
1713 tos_out = ltos;
1714 break;
1715 case Bytecodes::_i2f:
1716 case Bytecodes::_l2f:
1717 case Bytecodes::_d2f:
1718 tos_out = ftos;
1719 break;
1720 case Bytecodes::_i2d:
1721 case Bytecodes::_l2d:
1722 case Bytecodes::_f2d:
1723 tos_out = dtos;
1724 break;
1725 default :
1726 ShouldNotReachHere();
1727 }
1728
1729 transition(tos_in, tos_out);
1730 #endif // ASSERT
1731
1732 // Conversion
1733 Label done;
1734 switch (bytecode()) {
1735 case Bytecodes::_i2l:
1736 __ z_lgfr(Z_tos, Z_tos);
1737 return;
1738 case Bytecodes::_i2f:
1739 __ z_cefbr(Z_ftos, Z_tos);
1740 return;
1741 case Bytecodes::_i2d:
1742 __ z_cdfbr(Z_ftos, Z_tos);
1743 return;
1744 case Bytecodes::_i2b:
1745 // Sign extend least significant byte.
1746 __ move_reg_if_needed(Z_tos, T_BYTE, Z_tos, T_INT);
1747 return;
1748 case Bytecodes::_i2c:
1749 // Zero extend 2 least significant bytes.
1750 __ move_reg_if_needed(Z_tos, T_CHAR, Z_tos, T_INT);
1751 return;
1752 case Bytecodes::_i2s:
1753 // Sign extend 2 least significant bytes.
1754 __ move_reg_if_needed(Z_tos, T_SHORT, Z_tos, T_INT);
1755 return;
1756 case Bytecodes::_l2i:
1757 // Sign-extend not needed here, upper 4 bytes of int value in register are ignored.
1758 return;
1759 case Bytecodes::_l2f:
1760 __ z_cegbr(Z_ftos, Z_tos);
1761 return;
1762 case Bytecodes::_l2d:
1763 __ z_cdgbr(Z_ftos, Z_tos);
1764 return;
1765 case Bytecodes::_f2i:
1766 case Bytecodes::_f2l:
1767 __ clear_reg(Z_tos, true, false); // Don't set CC.
1768 __ z_cebr(Z_ftos, Z_ftos);
1769 __ z_brno(done); // NaN -> 0
1770 if (bytecode() == Bytecodes::_f2i)
1771 __ z_cfebr(Z_tos, Z_ftos, Assembler::to_zero);
1772 else // bytecode() == Bytecodes::_f2l
1773 __ z_cgebr(Z_tos, Z_ftos, Assembler::to_zero);
1774 break;
1775 case Bytecodes::_f2d:
1776 __ move_freg_if_needed(Z_ftos, T_DOUBLE, Z_ftos, T_FLOAT);
1777 return;
1778 case Bytecodes::_d2i:
1779 case Bytecodes::_d2l:
1780 __ clear_reg(Z_tos, true, false); // Ddon't set CC.
1781 __ z_cdbr(Z_ftos, Z_ftos);
1782 __ z_brno(done); // NaN -> 0
1783 if (bytecode() == Bytecodes::_d2i)
1784 __ z_cfdbr(Z_tos, Z_ftos, Assembler::to_zero);
1785 else // Bytecodes::_d2l
1786 __ z_cgdbr(Z_tos, Z_ftos, Assembler::to_zero);
1787 break;
1788 case Bytecodes::_d2f:
1789 __ move_freg_if_needed(Z_ftos, T_FLOAT, Z_ftos, T_DOUBLE);
1790 return;
1791 default:
1792 ShouldNotReachHere();
1793 }
1794 __ bind(done);
1795 }
1796
lcmp()1797 void TemplateTable::lcmp() {
1798 transition(ltos, itos);
1799
1800 Label done;
1801 Register val1 = Z_R0_scratch;
1802 Register val2 = Z_R1_scratch;
1803
1804 if (VM_Version::has_LoadStoreConditional()) {
1805 __ pop_l(val1); // pop value 1.
1806 __ z_lghi(val2, -1); // lt value
1807 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances.
1808 __ z_lghi(val1, 1); // gt value
1809 __ z_lghi(Z_tos, 0); // eq value
1810
1811 __ z_locgr(Z_tos, val1, Assembler::bcondHigh);
1812 __ z_locgr(Z_tos, val2, Assembler::bcondLow);
1813 } else {
1814 __ pop_l(val1); // Pop value 1.
1815 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances.
1816
1817 __ z_lghi(Z_tos, 0); // eq value
1818 __ z_bre(done);
1819
1820 __ z_lghi(Z_tos, 1); // gt value
1821 __ z_brh(done);
1822
1823 __ z_lghi(Z_tos, -1); // lt value
1824 }
1825
1826 __ bind(done);
1827 }
1828
1829
float_cmp(bool is_float,int unordered_result)1830 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1831 Label done;
1832
1833 if (is_float) {
1834 __ pop_f(Z_FARG2);
1835 __ z_cebr(Z_FARG2, Z_ftos);
1836 } else {
1837 __ pop_d(Z_FARG2);
1838 __ z_cdbr(Z_FARG2, Z_ftos);
1839 }
1840
1841 if (VM_Version::has_LoadStoreConditional()) {
1842 Register one = Z_R0_scratch;
1843 Register minus_one = Z_R1_scratch;
1844 __ z_lghi(minus_one, -1);
1845 __ z_lghi(one, 1);
1846 __ z_lghi(Z_tos, 0);
1847 __ z_locgr(Z_tos, one, unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh);
1848 __ z_locgr(Z_tos, minus_one, unordered_result == 1 ? Assembler::bcondLow : Assembler::bcondLowOrNotOrdered);
1849 } else {
1850 // Z_FARG2 == Z_ftos
1851 __ clear_reg(Z_tos, false, false);
1852 __ z_bre(done);
1853
1854 // F_ARG2 > Z_Ftos, or unordered
1855 __ z_lhi(Z_tos, 1);
1856 __ z_brc(unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh, done);
1857
1858 // F_ARG2 < Z_FTOS, or unordered
1859 __ z_lhi(Z_tos, -1);
1860
1861 __ bind(done);
1862 }
1863 }
1864
branch(bool is_jsr,bool is_wide)1865 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1866 const Register bumped_count = Z_tmp_1;
1867 const Register method = Z_tmp_2;
1868 const Register m_counters = Z_R1_scratch;
1869 const Register mdo = Z_tos;
1870
1871 BLOCK_COMMENT("TemplateTable::branch {");
1872 __ get_method(method);
1873 __ profile_taken_branch(mdo, bumped_count);
1874
1875 const ByteSize ctr_offset = InvocationCounter::counter_offset();
1876 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + ctr_offset;
1877 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + ctr_offset;
1878
1879 // Get (wide) offset to disp.
1880 const Register disp = Z_ARG5;
1881 if (is_wide) {
1882 __ get_4_byte_integer_at_bcp(disp, 1);
1883 } else {
1884 __ get_2_byte_integer_at_bcp(disp, 1, InterpreterMacroAssembler::Signed);
1885 }
1886
1887 // Handle all the JSR stuff here, then exit.
1888 // It's much shorter and cleaner than intermingling with the
1889 // non-JSR normal-branch stuff occurring below.
1890 if (is_jsr) {
1891 // Compute return address as bci in Z_tos.
1892 __ z_lgr(Z_R1_scratch, Z_bcp);
1893 __ z_sg(Z_R1_scratch, Address(method, Method::const_offset()));
1894 __ add2reg(Z_tos, (is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset()), Z_R1_scratch);
1895
1896 // Bump bcp to target of JSR.
1897 __ z_agr(Z_bcp, disp);
1898 // Push return address for "ret" on stack.
1899 __ push_ptr(Z_tos);
1900 // And away we go!
1901 __ dispatch_next(vtos, 0 , true);
1902 return;
1903 }
1904
1905 // Normal (non-jsr) branch handling.
1906
1907 // Bump bytecode pointer by displacement (take the branch).
1908 __ z_agr(Z_bcp, disp);
1909
1910 assert(UseLoopCounter || !UseOnStackReplacement,
1911 "on-stack-replacement requires loop counters");
1912
1913 NearLabel backedge_counter_overflow;
1914 NearLabel profile_method;
1915 NearLabel dispatch;
1916 int increment = InvocationCounter::count_increment;
1917
1918 if (UseLoopCounter) {
1919 // Increment backedge counter for backward branches.
1920 // disp: target offset
1921 // Z_bcp: target bcp
1922 // Z_locals: locals pointer
1923 //
1924 // Count only if backward branch.
1925 __ compare32_and_branch(disp, (intptr_t)0, Assembler::bcondHigh, dispatch);
1926
1927 if (TieredCompilation) {
1928 Label noCounters;
1929
1930 if (ProfileInterpreter) {
1931 NearLabel no_mdo;
1932
1933 // Are we profiling?
1934 __ load_and_test_long(mdo, Address(method, Method::method_data_offset()));
1935 __ branch_optimized(Assembler::bcondZero, no_mdo);
1936
1937 // Increment the MDO backedge counter.
1938 const Address mdo_backedge_counter(mdo, MethodData::backedge_counter_offset() + InvocationCounter::counter_offset());
1939
1940 const Address mask(mdo, MethodData::backedge_mask_offset());
1941 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1942 Z_ARG2, false, Assembler::bcondZero,
1943 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
1944 __ z_bru(dispatch);
1945 __ bind(no_mdo);
1946 }
1947
1948 // Increment backedge counter in MethodCounters*.
1949 __ get_method_counters(method, m_counters, noCounters);
1950 const Address mask(m_counters, MethodCounters::backedge_mask_offset());
1951 __ increment_mask_and_jump(Address(m_counters, be_offset),
1952 increment, mask,
1953 Z_ARG2, false, Assembler::bcondZero,
1954 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
1955 __ bind(noCounters);
1956 } else {
1957 Register counter = Z_tos;
1958 Label noCounters;
1959 // Get address of MethodCounters object.
1960 __ get_method_counters(method, m_counters, noCounters);
1961 // Increment backedge counter.
1962 __ increment_backedge_counter(m_counters, counter);
1963
1964 if (ProfileInterpreter) {
1965 // Test to see if we should create a method data obj.
1966 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_profile_limit_offset()));
1967 __ z_brl(dispatch);
1968
1969 // If no method data exists, go to profile method.
1970 __ test_method_data_pointer(Z_ARG4/*result unused*/, profile_method);
1971
1972 if (UseOnStackReplacement) {
1973 // Check for overflow against 'bumped_count' which is the MDO taken count.
1974 __ z_cl(bumped_count, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset()));
1975 __ z_brl(dispatch);
1976
1977 // When ProfileInterpreter is on, the backedge_count comes
1978 // from the methodDataOop, which value does not get reset on
1979 // the call to frequency_counter_overflow(). To avoid
1980 // excessive calls to the overflow routine while the method is
1981 // being compiled, add a second test to make sure the overflow
1982 // function is called only once every overflow_frequency.
1983 const int overflow_frequency = 1024;
1984 __ and_imm(bumped_count, overflow_frequency - 1);
1985 __ z_brz(backedge_counter_overflow);
1986
1987 }
1988 } else {
1989 if (UseOnStackReplacement) {
1990 // Check for overflow against 'counter', which is the sum of the
1991 // counters.
1992 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset()));
1993 __ z_brh(backedge_counter_overflow);
1994 }
1995 }
1996 __ bind(noCounters);
1997 }
1998
1999 __ bind(dispatch);
2000 }
2001
2002 // Pre-load the next target bytecode into rbx.
2003 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0));
2004
2005 // Continue with the bytecode @ target.
2006 // Z_tos: Return bci for jsr's, unused otherwise.
2007 // Z_bytecode: target bytecode
2008 // Z_bcp: target bcp
2009 __ dispatch_only(vtos, true);
2010
2011 // Out-of-line code runtime calls.
2012 if (UseLoopCounter) {
2013 if (ProfileInterpreter && !TieredCompilation) {
2014 // Out-of-line code to allocate method data oop.
2015 __ bind(profile_method);
2016
2017 __ call_VM(noreg,
2018 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2019 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0)); // Restore target bytecode.
2020 __ set_method_data_pointer_for_bcp();
2021 __ z_bru(dispatch);
2022 }
2023
2024 if (UseOnStackReplacement) {
2025
2026 // invocation counter overflow
2027 __ bind(backedge_counter_overflow);
2028
2029 __ z_lcgr(Z_ARG2, disp); // Z_ARG2 := -disp
2030 __ z_agr(Z_ARG2, Z_bcp); // Z_ARG2 := branch target bcp - disp == branch bcp
2031 __ call_VM(noreg,
2032 CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
2033 Z_ARG2);
2034
2035 // Z_RET: osr nmethod (osr ok) or NULL (osr not possible).
2036 __ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch);
2037
2038 // Nmethod may have been invalidated (VM may block upon call_VM return).
2039 __ z_cliy(nmethod::state_offset(), Z_RET, nmethod::in_use);
2040 __ z_brne(dispatch);
2041
2042 // Migrate the interpreter frame off of the stack.
2043
2044 __ z_lgr(Z_tmp_1, Z_RET); // Save the nmethod.
2045
2046 call_VM(noreg,
2047 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2048
2049 // Z_RET is OSR buffer, move it to expected parameter location.
2050 __ lgr_if_needed(Z_ARG1, Z_RET);
2051
2052 // Pop the interpreter frame ...
2053 __ pop_interpreter_frame(Z_R14, Z_ARG2/*tmp1*/, Z_ARG3/*tmp2*/);
2054
2055 // ... and begin the OSR nmethod.
2056 __ z_lg(Z_R1_scratch, Address(Z_tmp_1, nmethod::osr_entry_point_offset()));
2057 __ z_br(Z_R1_scratch);
2058 }
2059 }
2060 BLOCK_COMMENT("} TemplateTable::branch");
2061 }
2062
if_0cmp(Condition cc)2063 void TemplateTable::if_0cmp(Condition cc) {
2064 transition(itos, vtos);
2065
2066 // Assume branch is more often taken than not (loops use backward branches).
2067 NearLabel not_taken;
2068 __ compare32_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken);
2069 branch(false, false);
2070 __ bind(not_taken);
2071 __ profile_not_taken_branch(Z_tos);
2072 }
2073
if_icmp(Condition cc)2074 void TemplateTable::if_icmp(Condition cc) {
2075 transition(itos, vtos);
2076
2077 // Assume branch is more often taken than not (loops use backward branches).
2078 NearLabel not_taken;
2079 __ pop_i(Z_R0_scratch);
2080 __ compare32_and_branch(Z_R0_scratch, Z_tos, j_not(cc), not_taken);
2081 branch(false, false);
2082 __ bind(not_taken);
2083 __ profile_not_taken_branch(Z_tos);
2084 }
2085
if_nullcmp(Condition cc)2086 void TemplateTable::if_nullcmp(Condition cc) {
2087 transition(atos, vtos);
2088
2089 // Assume branch is more often taken than not (loops use backward branches) .
2090 NearLabel not_taken;
2091 __ compare64_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken);
2092 branch(false, false);
2093 __ bind(not_taken);
2094 __ profile_not_taken_branch(Z_tos);
2095 }
2096
if_acmp(Condition cc)2097 void TemplateTable::if_acmp(Condition cc) {
2098 transition(atos, vtos);
2099 // Assume branch is more often taken than not (loops use backward branches).
2100 NearLabel not_taken;
2101 __ pop_ptr(Z_ARG2);
2102 __ verify_oop(Z_ARG2);
2103 __ verify_oop(Z_tos);
2104 __ compareU64_and_branch(Z_tos, Z_ARG2, j_not(cc), not_taken);
2105 branch(false, false);
2106 __ bind(not_taken);
2107 __ profile_not_taken_branch(Z_ARG3);
2108 }
2109
ret()2110 void TemplateTable::ret() {
2111 transition(vtos, vtos);
2112
2113 locals_index(Z_tmp_1);
2114 // Get return bci, compute return bcp. Must load 64 bits.
2115 __ mem2reg_opt(Z_tmp_1, iaddress(_masm, Z_tmp_1));
2116 __ profile_ret(Z_tmp_1, Z_tmp_2);
2117 __ get_method(Z_tos);
2118 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset()));
2119 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset()));
2120 __ dispatch_next(vtos, 0 , true);
2121 }
2122
wide_ret()2123 void TemplateTable::wide_ret() {
2124 transition(vtos, vtos);
2125
2126 locals_index_wide(Z_tmp_1);
2127 // Get return bci, compute return bcp.
2128 __ mem2reg_opt(Z_tmp_1, aaddress(_masm, Z_tmp_1));
2129 __ profile_ret(Z_tmp_1, Z_tmp_2);
2130 __ get_method(Z_tos);
2131 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset()));
2132 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset()));
2133 __ dispatch_next(vtos, 0, true);
2134 }
2135
tableswitch()2136 void TemplateTable::tableswitch () {
2137 transition(itos, vtos);
2138
2139 NearLabel default_case, continue_execution;
2140 Register bcp = Z_ARG5;
2141 // Align bcp.
2142 __ load_address(bcp, at_bcp(BytesPerInt));
2143 __ z_nill(bcp, (-BytesPerInt) & 0xffff);
2144
2145 // Load lo & hi.
2146 Register low = Z_tmp_1;
2147 Register high = Z_tmp_2;
2148
2149 // Load low into 64 bits, since used for address calculation.
2150 __ mem2reg_signed_opt(low, Address(bcp, BytesPerInt));
2151 __ mem2reg_opt(high, Address(bcp, 2 * BytesPerInt), false);
2152 // Sign extend "label" value for address calculation.
2153 __ z_lgfr(Z_tos, Z_tos);
2154
2155 // Check against lo & hi.
2156 __ compare32_and_branch(Z_tos, low, Assembler::bcondLow, default_case);
2157 __ compare32_and_branch(Z_tos, high, Assembler::bcondHigh, default_case);
2158
2159 // Lookup dispatch offset.
2160 __ z_sgr(Z_tos, low);
2161 Register jump_table_offset = Z_ARG3;
2162 // Index2offset; index in Z_tos is killed by profile_switch_case.
2163 __ z_sllg(jump_table_offset, Z_tos, LogBytesPerInt);
2164 __ profile_switch_case(Z_tos, Z_ARG4 /*tmp for mdp*/, low/*tmp*/, Z_bytecode/*tmp*/);
2165
2166 Register index = Z_tmp_2;
2167
2168 // Load index sign extended for addressing.
2169 __ mem2reg_signed_opt(index, Address(bcp, jump_table_offset, 3 * BytesPerInt));
2170
2171 // Continue execution.
2172 __ bind(continue_execution);
2173
2174 // Load next bytecode.
2175 __ z_llgc(Z_bytecode, Address(Z_bcp, index));
2176 __ z_agr(Z_bcp, index); // Advance bcp.
2177 __ dispatch_only(vtos, true);
2178
2179 // Handle default.
2180 __ bind(default_case);
2181
2182 __ profile_switch_default(Z_tos);
2183 __ mem2reg_signed_opt(index, Address(bcp));
2184 __ z_bru(continue_execution);
2185 }
2186
lookupswitch()2187 void TemplateTable::lookupswitch () {
2188 transition(itos, itos);
2189 __ stop("lookupswitch bytecode should have been rewritten");
2190 }
2191
fast_linearswitch()2192 void TemplateTable::fast_linearswitch () {
2193 transition(itos, vtos);
2194
2195 Label loop_entry, loop, found, continue_execution;
2196 Register bcp = Z_ARG5;
2197
2198 // Align bcp.
2199 __ load_address(bcp, at_bcp(BytesPerInt));
2200 __ z_nill(bcp, (-BytesPerInt) & 0xffff);
2201
2202 // Start search with last case.
2203 Register current_case_offset = Z_tmp_1;
2204
2205 __ mem2reg_signed_opt(current_case_offset, Address(bcp, BytesPerInt));
2206 __ z_sllg(current_case_offset, current_case_offset, LogBytesPerWord); // index2bytes
2207 __ z_bru(loop_entry);
2208
2209 // table search
2210 __ bind(loop);
2211
2212 __ z_c(Z_tos, Address(bcp, current_case_offset, 2 * BytesPerInt));
2213 __ z_bre(found);
2214
2215 __ bind(loop_entry);
2216 __ z_aghi(current_case_offset, -2 * BytesPerInt); // Decrement.
2217 __ z_brnl(loop);
2218
2219 // default case
2220 Register offset = Z_tmp_2;
2221
2222 __ profile_switch_default(Z_tos);
2223 // Load offset sign extended for addressing.
2224 __ mem2reg_signed_opt(offset, Address(bcp));
2225 __ z_bru(continue_execution);
2226
2227 // Entry found -> get offset.
2228 __ bind(found);
2229 __ mem2reg_signed_opt(offset, Address(bcp, current_case_offset, 3 * BytesPerInt));
2230 // Profile that this case was taken.
2231 Register current_case_idx = Z_ARG4;
2232 __ z_srlg(current_case_idx, current_case_offset, LogBytesPerWord); // bytes2index
2233 __ profile_switch_case(current_case_idx, Z_tos, bcp, Z_bytecode);
2234
2235 // Continue execution.
2236 __ bind(continue_execution);
2237
2238 // Load next bytecode.
2239 __ z_llgc(Z_bytecode, Address(Z_bcp, offset, 0));
2240 __ z_agr(Z_bcp, offset); // Advance bcp.
2241 __ dispatch_only(vtos, true);
2242 }
2243
2244
fast_binaryswitch()2245 void TemplateTable::fast_binaryswitch() {
2246
2247 transition(itos, vtos);
2248
2249 // Implementation using the following core algorithm:
2250 //
2251 // int binary_search(int key, LookupswitchPair* array, int n) {
2252 // // Binary search according to "Methodik des Programmierens" by
2253 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2254 // int i = 0;
2255 // int j = n;
2256 // while (i+1 < j) {
2257 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2258 // // with Q: for all i: 0 <= i < n: key < a[i]
2259 // // where a stands for the array and assuming that the (inexisting)
2260 // // element a[n] is infinitely big.
2261 // int h = (i + j) >> 1;
2262 // // i < h < j
2263 // if (key < array[h].fast_match()) {
2264 // j = h;
2265 // } else {
2266 // i = h;
2267 // }
2268 // }
2269 // // R: a[i] <= key < a[i+1] or Q
2270 // // (i.e., if key is within array, i is the correct index)
2271 // return i;
2272 // }
2273
2274 // Register allocation
2275 // Note: Since we use the indices in address operands, we do all the
2276 // computation in 64 bits.
2277 const Register key = Z_tos; // Already set (tosca).
2278 const Register array = Z_tmp_1;
2279 const Register i = Z_tmp_2;
2280 const Register j = Z_ARG5;
2281 const Register h = Z_ARG4;
2282 const Register temp = Z_R1_scratch;
2283
2284 // Find array start.
2285 __ load_address(array, at_bcp(3 * BytesPerInt));
2286 __ z_nill(array, (-BytesPerInt) & 0xffff); // align
2287
2288 // Initialize i & j.
2289 __ clear_reg(i, true, false); // i = 0; Don't set CC.
2290 __ mem2reg_signed_opt(j, Address(array, -BytesPerInt)); // j = length(array);
2291
2292 // And start.
2293 Label entry;
2294 __ z_bru(entry);
2295
2296 // binary search loop
2297 {
2298 NearLabel loop;
2299
2300 __ bind(loop);
2301
2302 // int h = (i + j) >> 1;
2303 __ add2reg_with_index(h, 0, i, j); // h = i + j;
2304 __ z_srag(h, h, 1); // h = (i + j) >> 1;
2305
2306 // if (key < array[h].fast_match()) {
2307 // j = h;
2308 // } else {
2309 // i = h;
2310 // }
2311
2312 // Convert array[h].match to native byte-ordering before compare.
2313 __ z_sllg(temp, h, LogBytesPerWord); // index2bytes
2314 __ mem2reg_opt(temp, Address(array, temp), false);
2315
2316 NearLabel else_;
2317
2318 __ compare32_and_branch(key, temp, Assembler::bcondNotLow, else_);
2319 // j = h if (key < array[h].fast_match())
2320 __ z_lgr(j, h);
2321 __ z_bru(entry); // continue
2322
2323 __ bind(else_);
2324
2325 // i = h if (key >= array[h].fast_match())
2326 __ z_lgr(i, h); // and fallthrough
2327
2328 // while (i+1 < j)
2329 __ bind(entry);
2330
2331 // if (i + 1 < j) continue search
2332 __ add2reg(h, 1, i);
2333 __ compare64_and_branch(h, j, Assembler::bcondLow, loop);
2334 }
2335
2336 // End of binary search, result index is i (must check again!).
2337 NearLabel default_case;
2338
2339 // h is no longer needed, so use it to hold the byte offset.
2340 __ z_sllg(h, i, LogBytesPerWord); // index2bytes
2341 __ mem2reg_opt(temp, Address(array, h), false);
2342 __ compare32_and_branch(key, temp, Assembler::bcondNotEqual, default_case);
2343
2344 // entry found -> j = offset
2345 __ mem2reg_signed_opt(j, Address(array, h, BytesPerInt));
2346 __ profile_switch_case(i, key, array, Z_bytecode);
2347 // Load next bytecode.
2348 __ z_llgc(Z_bytecode, Address(Z_bcp, j));
2349 __ z_agr(Z_bcp, j); // Advance bcp.
2350 __ dispatch_only(vtos, true);
2351
2352 // default case -> j = default offset
2353 __ bind(default_case);
2354
2355 __ profile_switch_default(i);
2356 __ mem2reg_signed_opt(j, Address(array, -2 * BytesPerInt));
2357 // Load next bytecode.
2358 __ z_llgc(Z_bytecode, Address(Z_bcp, j));
2359 __ z_agr(Z_bcp, j); // Advance bcp.
2360 __ dispatch_only(vtos, true);
2361 }
2362
_return(TosState state)2363 void TemplateTable::_return(TosState state) {
2364 transition(state, state);
2365 assert(_desc->calls_vm(),
2366 "inconsistent calls_vm information"); // call in remove_activation
2367
2368 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2369 Register Rthis = Z_ARG2;
2370 Register Rklass = Z_ARG5;
2371 Label skip_register_finalizer;
2372 assert(state == vtos, "only valid state");
2373 __ z_lg(Rthis, aaddress(0));
2374 __ load_klass(Rklass, Rthis);
2375 __ testbit(Address(Rklass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER));
2376 __ z_bfalse(skip_register_finalizer);
2377 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Rthis);
2378 __ bind(skip_register_finalizer);
2379 }
2380
2381 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2382 Label no_safepoint;
2383 const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
2384 __ z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2385 __ z_braz(no_safepoint);
2386 __ push(state);
2387 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2388 __ pop(state);
2389 __ bind(no_safepoint);
2390 }
2391
2392 if (state == itos) {
2393 // Narrow result if state is itos but result type is smaller.
2394 // Need to narrow in the return bytecode rather than in generate_return_entry
2395 // since compiled code callers expect the result to already be narrowed.
2396 __ narrow(Z_tos, Z_tmp_1); /* fall through */
2397 }
2398
2399 __ remove_activation(state, Z_R14);
2400 __ z_br(Z_R14);
2401 }
2402
2403 // ----------------------------------------------------------------------------
2404 // NOTE: Cpe_offset is already computed as byte offset, so we must not
2405 // shift it afterwards!
resolve_cache_and_index(int byte_no,Register Rcache,Register cpe_offset,size_t index_size)2406 void TemplateTable::resolve_cache_and_index(int byte_no,
2407 Register Rcache,
2408 Register cpe_offset,
2409 size_t index_size) {
2410 BLOCK_COMMENT("resolve_cache_and_index {");
2411 NearLabel resolved;
2412 const Register bytecode_in_cpcache = Z_R1_scratch;
2413 const int total_f1_offset = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset());
2414 assert_different_registers(Rcache, cpe_offset, bytecode_in_cpcache);
2415
2416 Bytecodes::Code code = bytecode();
2417 switch (code) {
2418 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2419 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2420 }
2421
2422 {
2423 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2424 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, cpe_offset, bytecode_in_cpcache, byte_no, 1, index_size);
2425 // Have we resolved this bytecode?
2426 __ compare32_and_branch(bytecode_in_cpcache, (int)code, Assembler::bcondEqual, resolved);
2427 }
2428
2429 // Resolve first time through.
2430 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2431 __ load_const_optimized(Z_ARG2, (int) code);
2432 __ call_VM(noreg, entry, Z_ARG2);
2433
2434 // Update registers with resolved info.
2435 __ get_cache_and_index_at_bcp(Rcache, cpe_offset, 1, index_size);
2436 __ bind(resolved);
2437 BLOCK_COMMENT("} resolve_cache_and_index");
2438 }
2439
2440 // The Rcache and index registers must be set before call.
2441 // Index is already a byte offset, don't shift!
load_field_cp_cache_entry(Register obj,Register cache,Register index,Register off,Register flags,bool is_static=false)2442 void TemplateTable::load_field_cp_cache_entry(Register obj,
2443 Register cache,
2444 Register index,
2445 Register off,
2446 Register flags,
2447 bool is_static = false) {
2448 assert_different_registers(cache, index, flags, off);
2449 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2450
2451 // Field offset
2452 __ mem2reg_opt(off, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2453 // Flags. Must load 64 bits.
2454 __ mem2reg_opt(flags, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2455
2456 // klass overwrite register
2457 if (is_static) {
2458 __ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2459 __ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset()));
2460 __ resolve_oop_handle(obj);
2461 }
2462 }
2463
load_invoke_cp_cache_entry(int byte_no,Register method,Register itable_index,Register flags,bool is_invokevirtual,bool is_invokevfinal,bool is_invokedynamic)2464 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2465 Register method,
2466 Register itable_index,
2467 Register flags,
2468 bool is_invokevirtual,
2469 bool is_invokevfinal, // unused
2470 bool is_invokedynamic) {
2471 BLOCK_COMMENT("load_invoke_cp_cache_entry {");
2472 // Setup registers.
2473 const Register cache = Z_ARG1;
2474 const Register cpe_offset= flags;
2475 const ByteSize base_off = ConstantPoolCache::base_offset();
2476 const ByteSize f1_off = ConstantPoolCacheEntry::f1_offset();
2477 const ByteSize f2_off = ConstantPoolCacheEntry::f2_offset();
2478 const ByteSize flags_off = ConstantPoolCacheEntry::flags_offset();
2479 const int method_offset = in_bytes(base_off + ((byte_no == f2_byte) ? f2_off : f1_off));
2480 const int flags_offset = in_bytes(base_off + flags_off);
2481 // Access constant pool cache fields.
2482 const int index_offset = in_bytes(base_off + f2_off);
2483
2484 assert_different_registers(method, itable_index, flags, cache);
2485 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2486
2487 if (is_invokevfinal) {
2488 // Already resolved.
2489 assert(itable_index == noreg, "register not used");
2490 __ get_cache_and_index_at_bcp(cache, cpe_offset, 1);
2491 } else {
2492 // Need to resolve.
2493 resolve_cache_and_index(byte_no, cache, cpe_offset, is_invokedynamic ? sizeof(u4) : sizeof(u2));
2494 }
2495 __ z_lg(method, Address(cache, cpe_offset, method_offset));
2496
2497 if (itable_index != noreg) {
2498 __ z_lg(itable_index, Address(cache, cpe_offset, index_offset));
2499 }
2500
2501 // Only load the lower 4 bytes and fill high bytes of flags with zeros.
2502 // Callers depend on this zero-extension!!!
2503 // Attention: overwrites cpe_offset == flags
2504 __ z_llgf(flags, Address(cache, cpe_offset, flags_offset + (BytesPerLong-BytesPerInt)));
2505
2506 BLOCK_COMMENT("} load_invoke_cp_cache_entry");
2507 }
2508
2509 // The registers cache and index expected to be set before call.
2510 // Correct values of the cache and index registers are preserved.
jvmti_post_field_access(Register cache,Register index,bool is_static,bool has_tos)2511 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2512 bool is_static, bool has_tos) {
2513
2514 // Do the JVMTI work here to avoid disturbing the register state below.
2515 // We use c_rarg registers here because we want to use the register used in
2516 // the call to the VM
2517 if (!JvmtiExport::can_post_field_access()) {
2518 return;
2519 }
2520
2521 // Check to see if a field access watch has been set before we
2522 // take the time to call into the VM.
2523 Label exit;
2524 assert_different_registers(cache, index, Z_tos);
2525 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_access_count_addr());
2526 __ load_and_test_int(Z_R0, Address(Z_tos));
2527 __ z_brz(exit);
2528
2529 // Index is returned as byte offset, do not shift!
2530 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1);
2531
2532 // cache entry pointer
2533 __ add2reg_with_index(Z_ARG3,
2534 in_bytes(ConstantPoolCache::base_offset()),
2535 Z_ARG3, Z_R1_scratch);
2536
2537 if (is_static) {
2538 __ clear_reg(Z_ARG2, true, false); // NULL object reference. Don't set CC.
2539 } else {
2540 __ mem2reg_opt(Z_ARG2, at_tos()); // Get object pointer without popping it.
2541 __ verify_oop(Z_ARG2);
2542 }
2543 // Z_ARG2: object pointer or NULL
2544 // Z_ARG3: cache entry pointer
2545 __ call_VM(noreg,
2546 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2547 Z_ARG2, Z_ARG3);
2548 __ get_cache_and_index_at_bcp(cache, index, 1);
2549
2550 __ bind(exit);
2551 }
2552
pop_and_check_object(Register r)2553 void TemplateTable::pop_and_check_object(Register r) {
2554 __ pop_ptr(r);
2555 __ null_check(r); // for field access must check obj.
2556 __ verify_oop(r);
2557 }
2558
getfield_or_static(int byte_no,bool is_static,RewriteControl rc)2559 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2560 transition(vtos, vtos);
2561
2562 const Register cache = Z_tmp_1;
2563 const Register index = Z_tmp_2;
2564 const Register obj = Z_tmp_1;
2565 const Register off = Z_ARG2;
2566 const Register flags = Z_ARG1;
2567 const Register bc = Z_tmp_1; // Uses same reg as obj, so don't mix them.
2568
2569 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2570 jvmti_post_field_access(cache, index, is_static, false);
2571 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2572
2573 if (!is_static) {
2574 // Obj is on the stack.
2575 pop_and_check_object(obj);
2576 }
2577
2578 // Displacement is 0, so any store instruction will be fine on any CPU.
2579 const Address field(obj, off);
2580
2581 Label is_Byte, is_Bool, is_Int, is_Short, is_Char,
2582 is_Long, is_Float, is_Object, is_Double;
2583 Label is_badState8, is_badState9, is_badStateA, is_badStateB,
2584 is_badStateC, is_badStateD, is_badStateE, is_badStateF,
2585 is_badState;
2586 Label branchTable, atosHandler, Done;
2587 Register br_tab = Z_R1_scratch;
2588 bool do_rewrite = !is_static && (rc == may_rewrite);
2589 bool dont_rewrite = (is_static || (rc == may_not_rewrite));
2590
2591 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that");
2592 assert(btos == 0, "change code, btos != 0");
2593
2594 // Calculate branch table size. Generated code size depends on ASSERT and on bytecode rewriting.
2595 #ifdef ASSERT
2596 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4;
2597 #else
2598 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4;
2599 #endif
2600
2601 // Calculate address of branch table entry and branch there.
2602 {
2603 const int bit_shift = exact_log2(bsize); // Size of each branch table entry.
2604 const int r_bitpos = 63 - bit_shift;
2605 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
2606 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift);
2607 __ z_larl(br_tab, branchTable);
2608 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true);
2609 }
2610 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab);
2611
2612 __ align_address(bsize);
2613 BIND(branchTable);
2614
2615 // btos
2616 BTB_BEGIN(is_Byte, bsize, "getfield_or_static:is_Byte");
2617 __ z_lb(Z_tos, field);
2618 __ push(btos);
2619 // Rewrite bytecode to be faster.
2620 if (do_rewrite) {
2621 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5);
2622 }
2623 __ z_bru(Done);
2624 BTB_END(is_Byte, bsize, "getfield_or_static:is_Byte");
2625
2626 // ztos
2627 BTB_BEGIN(is_Bool, bsize, "getfield_or_static:is_Bool");
2628 __ z_lb(Z_tos, field);
2629 __ push(ztos);
2630 // Rewrite bytecode to be faster.
2631 if (do_rewrite) {
2632 // Use btos rewriting, no truncating to t/f bit is needed for getfield.
2633 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5);
2634 }
2635 __ z_bru(Done);
2636 BTB_END(is_Bool, bsize, "getfield_or_static:is_Bool");
2637
2638 // ctos
2639 BTB_BEGIN(is_Char, bsize, "getfield_or_static:is_Char");
2640 // Load into 64 bits, works on all CPUs.
2641 __ z_llgh(Z_tos, field);
2642 __ push(ctos);
2643 // Rewrite bytecode to be faster.
2644 if (do_rewrite) {
2645 patch_bytecode(Bytecodes::_fast_cgetfield, bc, Z_ARG5);
2646 }
2647 __ z_bru(Done);
2648 BTB_END(is_Char, bsize, "getfield_or_static:is_Char");
2649
2650 // stos
2651 BTB_BEGIN(is_Short, bsize, "getfield_or_static:is_Short");
2652 __ z_lh(Z_tos, field);
2653 __ push(stos);
2654 // Rewrite bytecode to be faster.
2655 if (do_rewrite) {
2656 patch_bytecode(Bytecodes::_fast_sgetfield, bc, Z_ARG5);
2657 }
2658 __ z_bru(Done);
2659 BTB_END(is_Short, bsize, "getfield_or_static:is_Short");
2660
2661 // itos
2662 BTB_BEGIN(is_Int, bsize, "getfield_or_static:is_Int");
2663 __ mem2reg_opt(Z_tos, field, false);
2664 __ push(itos);
2665 // Rewrite bytecode to be faster.
2666 if (do_rewrite) {
2667 patch_bytecode(Bytecodes::_fast_igetfield, bc, Z_ARG5);
2668 }
2669 __ z_bru(Done);
2670 BTB_END(is_Int, bsize, "getfield_or_static:is_Int");
2671
2672 // ltos
2673 BTB_BEGIN(is_Long, bsize, "getfield_or_static:is_Long");
2674 __ mem2reg_opt(Z_tos, field);
2675 __ push(ltos);
2676 // Rewrite bytecode to be faster.
2677 if (do_rewrite) {
2678 patch_bytecode(Bytecodes::_fast_lgetfield, bc, Z_ARG5);
2679 }
2680 __ z_bru(Done);
2681 BTB_END(is_Long, bsize, "getfield_or_static:is_Long");
2682
2683 // ftos
2684 BTB_BEGIN(is_Float, bsize, "getfield_or_static:is_Float");
2685 __ mem2freg_opt(Z_ftos, field, false);
2686 __ push(ftos);
2687 // Rewrite bytecode to be faster.
2688 if (do_rewrite) {
2689 patch_bytecode(Bytecodes::_fast_fgetfield, bc, Z_ARG5);
2690 }
2691 __ z_bru(Done);
2692 BTB_END(is_Float, bsize, "getfield_or_static:is_Float");
2693
2694 // dtos
2695 BTB_BEGIN(is_Double, bsize, "getfield_or_static:is_Double");
2696 __ mem2freg_opt(Z_ftos, field);
2697 __ push(dtos);
2698 // Rewrite bytecode to be faster.
2699 if (do_rewrite) {
2700 patch_bytecode(Bytecodes::_fast_dgetfield, bc, Z_ARG5);
2701 }
2702 __ z_bru(Done);
2703 BTB_END(is_Double, bsize, "getfield_or_static:is_Double");
2704
2705 // atos
2706 BTB_BEGIN(is_Object, bsize, "getfield_or_static:is_Object");
2707 __ z_bru(atosHandler);
2708 BTB_END(is_Object, bsize, "getfield_or_static:is_Object");
2709
2710 // Bad state detection comes at no extra runtime cost.
2711 BTB_BEGIN(is_badState8, bsize, "getfield_or_static:is_badState8");
2712 __ z_illtrap();
2713 __ z_bru(is_badState);
2714 BTB_END( is_badState8, bsize, "getfield_or_static:is_badState8");
2715 BTB_BEGIN(is_badState9, bsize, "getfield_or_static:is_badState9");
2716 __ z_illtrap();
2717 __ z_bru(is_badState);
2718 BTB_END( is_badState9, bsize, "getfield_or_static:is_badState9");
2719 BTB_BEGIN(is_badStateA, bsize, "getfield_or_static:is_badStateA");
2720 __ z_illtrap();
2721 __ z_bru(is_badState);
2722 BTB_END( is_badStateA, bsize, "getfield_or_static:is_badStateA");
2723 BTB_BEGIN(is_badStateB, bsize, "getfield_or_static:is_badStateB");
2724 __ z_illtrap();
2725 __ z_bru(is_badState);
2726 BTB_END( is_badStateB, bsize, "getfield_or_static:is_badStateB");
2727 BTB_BEGIN(is_badStateC, bsize, "getfield_or_static:is_badStateC");
2728 __ z_illtrap();
2729 __ z_bru(is_badState);
2730 BTB_END( is_badStateC, bsize, "getfield_or_static:is_badStateC");
2731 BTB_BEGIN(is_badStateD, bsize, "getfield_or_static:is_badStateD");
2732 __ z_illtrap();
2733 __ z_bru(is_badState);
2734 BTB_END( is_badStateD, bsize, "getfield_or_static:is_badStateD");
2735 BTB_BEGIN(is_badStateE, bsize, "getfield_or_static:is_badStateE");
2736 __ z_illtrap();
2737 __ z_bru(is_badState);
2738 BTB_END( is_badStateE, bsize, "getfield_or_static:is_badStateE");
2739 BTB_BEGIN(is_badStateF, bsize, "getfield_or_static:is_badStateF");
2740 __ z_illtrap();
2741 __ z_bru(is_badState);
2742 BTB_END( is_badStateF, bsize, "getfield_or_static:is_badStateF");
2743
2744 __ align_address(64);
2745 BIND(is_badState); // Do this outside branch table. Needs a lot of space.
2746 {
2747 unsigned int b_off = __ offset();
2748 if (is_static) {
2749 __ stop_static("Bad state in getstatic");
2750 } else {
2751 __ stop_static("Bad state in getfield");
2752 }
2753 unsigned int e_off = __ offset();
2754 }
2755
2756 __ align_address(64);
2757 BIND(atosHandler); // Oops are really complicated to handle.
2758 // There is a lot of code generated.
2759 // Therefore: generate the handler outside of branch table.
2760 // There is no performance penalty. The additional branch
2761 // to here is compensated for by the fallthru to "Done".
2762 {
2763 unsigned int b_off = __ offset();
2764 do_oop_load(_masm, field, Z_tos, Z_tmp_2, Z_tmp_3, IN_HEAP);
2765 __ verify_oop(Z_tos);
2766 __ push(atos);
2767 if (do_rewrite) {
2768 patch_bytecode(Bytecodes::_fast_agetfield, bc, Z_ARG5);
2769 }
2770 unsigned int e_off = __ offset();
2771 }
2772
2773 BIND(Done);
2774 }
2775
getfield(int byte_no)2776 void TemplateTable::getfield(int byte_no) {
2777 BLOCK_COMMENT("getfield {");
2778 getfield_or_static(byte_no, false);
2779 BLOCK_COMMENT("} getfield");
2780 }
2781
nofast_getfield(int byte_no)2782 void TemplateTable::nofast_getfield(int byte_no) {
2783 getfield_or_static(byte_no, false, may_not_rewrite);
2784 }
2785
getstatic(int byte_no)2786 void TemplateTable::getstatic(int byte_no) {
2787 BLOCK_COMMENT("getstatic {");
2788 getfield_or_static(byte_no, true);
2789 BLOCK_COMMENT("} getstatic");
2790 }
2791
2792 // The registers cache and index expected to be set before call. The
2793 // function may destroy various registers, just not the cache and
2794 // index registers.
jvmti_post_field_mod(Register cache,Register index,bool is_static)2795 void TemplateTable::jvmti_post_field_mod(Register cache,
2796 Register index, bool is_static) {
2797 transition(vtos, vtos);
2798
2799 if (!JvmtiExport::can_post_field_modification()) {
2800 return;
2801 }
2802
2803 BLOCK_COMMENT("jvmti_post_field_mod {");
2804
2805 // Check to see if a field modification watch has been set before
2806 // we take the time to call into the VM.
2807 Label L1;
2808 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2809 assert_different_registers(cache, index, Z_tos);
2810
2811 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_modification_count_addr());
2812 __ load_and_test_int(Z_R0, Address(Z_tos));
2813 __ z_brz(L1);
2814
2815 // Index is returned as byte offset, do not shift!
2816 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1);
2817
2818 if (is_static) {
2819 // Life is simple. Null out the object pointer.
2820 __ clear_reg(Z_ARG2, true, false); // Don't set CC.
2821 } else {
2822 // Life is harder. The stack holds the value on top, followed by
2823 // the object. We don't know the size of the value, though. It
2824 // could be one or two words depending on its type. As a result,
2825 // we must find the type to determine where the object is.
2826 __ mem2reg_opt(Z_ARG4,
2827 Address(Z_ARG3, Z_R1_scratch,
2828 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()) +
2829 (BytesPerLong - BytesPerInt)),
2830 false);
2831 __ z_srl(Z_ARG4, ConstantPoolCacheEntry::tos_state_shift);
2832 // Make sure we don't need to mask Z_ARG4 for tos_state after the above shift.
2833 ConstantPoolCacheEntry::verify_tos_state_shift();
2834 __ mem2reg_opt(Z_ARG2, at_tos(1)); // Initially assume a one word jvalue.
2835
2836 NearLabel load_dtos, cont;
2837
2838 __ compareU32_and_branch(Z_ARG4, (intptr_t) ltos,
2839 Assembler::bcondNotEqual, load_dtos);
2840 __ mem2reg_opt(Z_ARG2, at_tos(2)); // ltos (two word jvalue)
2841 __ z_bru(cont);
2842
2843 __ bind(load_dtos);
2844 __ compareU32_and_branch(Z_ARG4, (intptr_t)dtos, Assembler::bcondNotEqual, cont);
2845 __ mem2reg_opt(Z_ARG2, at_tos(2)); // dtos (two word jvalue)
2846
2847 __ bind(cont);
2848 }
2849 // cache entry pointer
2850
2851 __ add2reg_with_index(Z_ARG3, in_bytes(cp_base_offset), Z_ARG3, Z_R1_scratch);
2852
2853 // object(tos)
2854 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize));
2855 // Z_ARG2: object pointer set up above (NULL if static)
2856 // Z_ARG3: cache entry pointer
2857 // Z_ARG4: jvalue object on the stack
2858 __ call_VM(noreg,
2859 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2860 Z_ARG2, Z_ARG3, Z_ARG4);
2861 __ get_cache_and_index_at_bcp(cache, index, 1);
2862
2863 __ bind(L1);
2864 BLOCK_COMMENT("} jvmti_post_field_mod");
2865 }
2866
2867
putfield_or_static(int byte_no,bool is_static,RewriteControl rc)2868 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2869 transition(vtos, vtos);
2870
2871 const Register cache = Z_tmp_1;
2872 const Register index = Z_ARG5;
2873 const Register obj = Z_tmp_1;
2874 const Register off = Z_tmp_2;
2875 const Register flags = Z_R1_scratch;
2876 const Register br_tab = Z_ARG5;
2877 const Register bc = Z_tmp_1;
2878 const Register oopStore_tmp1 = Z_R1_scratch;
2879 const Register oopStore_tmp2 = Z_ARG5;
2880 const Register oopStore_tmp3 = Z_R0_scratch;
2881
2882 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2883 jvmti_post_field_mod(cache, index, is_static);
2884 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2885 // begin of life for:
2886 // obj, off long life range
2887 // flags short life range, up to branch into branch table
2888 // end of life for:
2889 // cache, index
2890
2891 const Address field(obj, off);
2892 Label is_Byte, is_Bool, is_Int, is_Short, is_Char,
2893 is_Long, is_Float, is_Object, is_Double;
2894 Label is_badState8, is_badState9, is_badStateA, is_badStateB,
2895 is_badStateC, is_badStateD, is_badStateE, is_badStateF,
2896 is_badState;
2897 Label branchTable, atosHandler, Done;
2898 bool do_rewrite = !is_static && (rc == may_rewrite);
2899 bool dont_rewrite = (is_static || (rc == may_not_rewrite));
2900
2901 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that");
2902
2903 assert(btos == 0, "change code, btos != 0");
2904
2905 #ifdef ASSERT
2906 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*4;
2907 #else
2908 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*8;
2909 #endif
2910
2911 // Calculate address of branch table entry and branch there.
2912 {
2913 const int bit_shift = exact_log2(bsize); // Size of each branch table entry.
2914 const int r_bitpos = 63 - bit_shift;
2915 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
2916 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift);
2917 __ z_larl(br_tab, branchTable);
2918 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true);
2919 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab);
2920 }
2921 // end of life for:
2922 // flags, br_tab
2923
2924 __ align_address(bsize);
2925 BIND(branchTable);
2926
2927 // btos
2928 BTB_BEGIN(is_Byte, bsize, "putfield_or_static:is_Byte");
2929 __ pop(btos);
2930 if (!is_static) {
2931 pop_and_check_object(obj);
2932 }
2933 __ z_stc(Z_tos, field);
2934 if (do_rewrite) {
2935 patch_bytecode(Bytecodes::_fast_bputfield, bc, Z_ARG5, true, byte_no);
2936 }
2937 __ z_bru(Done);
2938 BTB_END( is_Byte, bsize, "putfield_or_static:is_Byte");
2939
2940 // ztos
2941 BTB_BEGIN(is_Bool, bsize, "putfield_or_static:is_Bool");
2942 __ pop(ztos);
2943 if (!is_static) {
2944 pop_and_check_object(obj);
2945 }
2946 __ z_nilf(Z_tos, 0x1);
2947 __ z_stc(Z_tos, field);
2948 if (do_rewrite) {
2949 patch_bytecode(Bytecodes::_fast_zputfield, bc, Z_ARG5, true, byte_no);
2950 }
2951 __ z_bru(Done);
2952 BTB_END(is_Bool, bsize, "putfield_or_static:is_Bool");
2953
2954 // ctos
2955 BTB_BEGIN(is_Char, bsize, "putfield_or_static:is_Char");
2956 __ pop(ctos);
2957 if (!is_static) {
2958 pop_and_check_object(obj);
2959 }
2960 __ z_sth(Z_tos, field);
2961 if (do_rewrite) {
2962 patch_bytecode(Bytecodes::_fast_cputfield, bc, Z_ARG5, true, byte_no);
2963 }
2964 __ z_bru(Done);
2965 BTB_END( is_Char, bsize, "putfield_or_static:is_Char");
2966
2967 // stos
2968 BTB_BEGIN(is_Short, bsize, "putfield_or_static:is_Short");
2969 __ pop(stos);
2970 if (!is_static) {
2971 pop_and_check_object(obj);
2972 }
2973 __ z_sth(Z_tos, field);
2974 if (do_rewrite) {
2975 patch_bytecode(Bytecodes::_fast_sputfield, bc, Z_ARG5, true, byte_no);
2976 }
2977 __ z_bru(Done);
2978 BTB_END( is_Short, bsize, "putfield_or_static:is_Short");
2979
2980 // itos
2981 BTB_BEGIN(is_Int, bsize, "putfield_or_static:is_Int");
2982 __ pop(itos);
2983 if (!is_static) {
2984 pop_and_check_object(obj);
2985 }
2986 __ reg2mem_opt(Z_tos, field, false);
2987 if (do_rewrite) {
2988 patch_bytecode(Bytecodes::_fast_iputfield, bc, Z_ARG5, true, byte_no);
2989 }
2990 __ z_bru(Done);
2991 BTB_END( is_Int, bsize, "putfield_or_static:is_Int");
2992
2993 // ltos
2994 BTB_BEGIN(is_Long, bsize, "putfield_or_static:is_Long");
2995 __ pop(ltos);
2996 if (!is_static) {
2997 pop_and_check_object(obj);
2998 }
2999 __ reg2mem_opt(Z_tos, field);
3000 if (do_rewrite) {
3001 patch_bytecode(Bytecodes::_fast_lputfield, bc, Z_ARG5, true, byte_no);
3002 }
3003 __ z_bru(Done);
3004 BTB_END( is_Long, bsize, "putfield_or_static:is_Long");
3005
3006 // ftos
3007 BTB_BEGIN(is_Float, bsize, "putfield_or_static:is_Float");
3008 __ pop(ftos);
3009 if (!is_static) {
3010 pop_and_check_object(obj);
3011 }
3012 __ freg2mem_opt(Z_ftos, field, false);
3013 if (do_rewrite) {
3014 patch_bytecode(Bytecodes::_fast_fputfield, bc, Z_ARG5, true, byte_no);
3015 }
3016 __ z_bru(Done);
3017 BTB_END( is_Float, bsize, "putfield_or_static:is_Float");
3018
3019 // dtos
3020 BTB_BEGIN(is_Double, bsize, "putfield_or_static:is_Double");
3021 __ pop(dtos);
3022 if (!is_static) {
3023 pop_and_check_object(obj);
3024 }
3025 __ freg2mem_opt(Z_ftos, field);
3026 if (do_rewrite) {
3027 patch_bytecode(Bytecodes::_fast_dputfield, bc, Z_ARG5, true, byte_no);
3028 }
3029 __ z_bru(Done);
3030 BTB_END( is_Double, bsize, "putfield_or_static:is_Double");
3031
3032 // atos
3033 BTB_BEGIN(is_Object, bsize, "putfield_or_static:is_Object");
3034 __ z_bru(atosHandler);
3035 BTB_END( is_Object, bsize, "putfield_or_static:is_Object");
3036
3037 // Bad state detection comes at no extra runtime cost.
3038 BTB_BEGIN(is_badState8, bsize, "putfield_or_static:is_badState8");
3039 __ z_illtrap();
3040 __ z_bru(is_badState);
3041 BTB_END( is_badState8, bsize, "putfield_or_static:is_badState8");
3042 BTB_BEGIN(is_badState9, bsize, "putfield_or_static:is_badState9");
3043 __ z_illtrap();
3044 __ z_bru(is_badState);
3045 BTB_END( is_badState9, bsize, "putfield_or_static:is_badState9");
3046 BTB_BEGIN(is_badStateA, bsize, "putfield_or_static:is_badStateA");
3047 __ z_illtrap();
3048 __ z_bru(is_badState);
3049 BTB_END( is_badStateA, bsize, "putfield_or_static:is_badStateA");
3050 BTB_BEGIN(is_badStateB, bsize, "putfield_or_static:is_badStateB");
3051 __ z_illtrap();
3052 __ z_bru(is_badState);
3053 BTB_END( is_badStateB, bsize, "putfield_or_static:is_badStateB");
3054 BTB_BEGIN(is_badStateC, bsize, "putfield_or_static:is_badStateC");
3055 __ z_illtrap();
3056 __ z_bru(is_badState);
3057 BTB_END( is_badStateC, bsize, "putfield_or_static:is_badStateC");
3058 BTB_BEGIN(is_badStateD, bsize, "putfield_or_static:is_badStateD");
3059 __ z_illtrap();
3060 __ z_bru(is_badState);
3061 BTB_END( is_badStateD, bsize, "putfield_or_static:is_badStateD");
3062 BTB_BEGIN(is_badStateE, bsize, "putfield_or_static:is_badStateE");
3063 __ z_illtrap();
3064 __ z_bru(is_badState);
3065 BTB_END( is_badStateE, bsize, "putfield_or_static:is_badStateE");
3066 BTB_BEGIN(is_badStateF, bsize, "putfield_or_static:is_badStateF");
3067 __ z_illtrap();
3068 __ z_bru(is_badState);
3069 BTB_END( is_badStateF, bsize, "putfield_or_static:is_badStateF");
3070
3071 __ align_address(64);
3072 BIND(is_badState); // Do this outside branch table. Needs a lot of space.
3073 {
3074 unsigned int b_off = __ offset();
3075 if (is_static) __ stop_static("Bad state in putstatic");
3076 else __ stop_static("Bad state in putfield");
3077 unsigned int e_off = __ offset();
3078 }
3079
3080 __ align_address(64);
3081 BIND(atosHandler); // Oops are really complicated to handle.
3082 // There is a lot of code generated.
3083 // Therefore: generate the handler outside of branch table.
3084 // There is no performance penalty. The additional branch
3085 // to here is compensated for by the fallthru to "Done".
3086 {
3087 unsigned int b_off = __ offset();
3088 __ pop(atos);
3089 if (!is_static) {
3090 pop_and_check_object(obj);
3091 }
3092 // Store into the field
3093 do_oop_store(_masm, Address(obj, off), Z_tos,
3094 oopStore_tmp1, oopStore_tmp2, oopStore_tmp3, IN_HEAP);
3095 if (do_rewrite) {
3096 patch_bytecode(Bytecodes::_fast_aputfield, bc, Z_ARG5, true, byte_no);
3097 }
3098 // __ z_bru(Done); // fallthru
3099 unsigned int e_off = __ offset();
3100 }
3101
3102 BIND(Done);
3103
3104 // Check for volatile store.
3105 Label notVolatile;
3106
3107 __ testbit(Z_ARG4, ConstantPoolCacheEntry::is_volatile_shift);
3108 __ z_brz(notVolatile);
3109 __ z_fence();
3110
3111 BIND(notVolatile);
3112 }
3113
putfield(int byte_no)3114 void TemplateTable::putfield(int byte_no) {
3115 BLOCK_COMMENT("putfield {");
3116 putfield_or_static(byte_no, false);
3117 BLOCK_COMMENT("} putfield");
3118 }
3119
nofast_putfield(int byte_no)3120 void TemplateTable::nofast_putfield(int byte_no) {
3121 putfield_or_static(byte_no, false, may_not_rewrite);
3122 }
3123
putstatic(int byte_no)3124 void TemplateTable::putstatic(int byte_no) {
3125 BLOCK_COMMENT("putstatic {");
3126 putfield_or_static(byte_no, true);
3127 BLOCK_COMMENT("} putstatic");
3128 }
3129
3130 // Push the tos value back to the stack.
3131 // gc will find oops there and update.
jvmti_post_fast_field_mod()3132 void TemplateTable::jvmti_post_fast_field_mod() {
3133
3134 if (!JvmtiExport::can_post_field_modification()) {
3135 return;
3136 }
3137
3138 // Check to see if a field modification watch has been set before
3139 // we take the time to call into the VM.
3140 Label exit;
3141
3142 BLOCK_COMMENT("jvmti_post_fast_field_mod {");
3143
3144 __ load_absolute_address(Z_R1_scratch,
3145 (address) JvmtiExport::get_field_modification_count_addr());
3146 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch));
3147 __ z_brz(exit);
3148
3149 Register obj = Z_tmp_1;
3150
3151 __ pop_ptr(obj); // Copy the object pointer from tos.
3152 __ verify_oop(obj);
3153 __ push_ptr(obj); // Put the object pointer back on tos.
3154
3155 // Save tos values before call_VM() clobbers them. Since we have
3156 // to do it for every data type, we use the saved values as the
3157 // jvalue object.
3158 switch (bytecode()) { // Load values into the jvalue object.
3159 case Bytecodes::_fast_aputfield:
3160 __ push_ptr(Z_tos);
3161 break;
3162 case Bytecodes::_fast_bputfield:
3163 case Bytecodes::_fast_zputfield:
3164 case Bytecodes::_fast_sputfield:
3165 case Bytecodes::_fast_cputfield:
3166 case Bytecodes::_fast_iputfield:
3167 __ push_i(Z_tos);
3168 break;
3169 case Bytecodes::_fast_dputfield:
3170 __ push_d();
3171 break;
3172 case Bytecodes::_fast_fputfield:
3173 __ push_f();
3174 break;
3175 case Bytecodes::_fast_lputfield:
3176 __ push_l(Z_tos);
3177 break;
3178
3179 default:
3180 ShouldNotReachHere();
3181 }
3182
3183 // jvalue on the stack
3184 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize));
3185 // Access constant pool cache entry.
3186 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tos, 1);
3187 __ verify_oop(obj);
3188
3189 // obj : object pointer copied above
3190 // Z_ARG3: cache entry pointer
3191 // Z_ARG4: jvalue object on the stack
3192 __ call_VM(noreg,
3193 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3194 obj, Z_ARG3, Z_ARG4);
3195
3196 switch (bytecode()) { // Restore tos values.
3197 case Bytecodes::_fast_aputfield:
3198 __ pop_ptr(Z_tos);
3199 break;
3200 case Bytecodes::_fast_bputfield:
3201 case Bytecodes::_fast_zputfield:
3202 case Bytecodes::_fast_sputfield:
3203 case Bytecodes::_fast_cputfield:
3204 case Bytecodes::_fast_iputfield:
3205 __ pop_i(Z_tos);
3206 break;
3207 case Bytecodes::_fast_dputfield:
3208 __ pop_d(Z_ftos);
3209 break;
3210 case Bytecodes::_fast_fputfield:
3211 __ pop_f(Z_ftos);
3212 break;
3213 case Bytecodes::_fast_lputfield:
3214 __ pop_l(Z_tos);
3215 break;
3216 }
3217
3218 __ bind(exit);
3219 BLOCK_COMMENT("} jvmti_post_fast_field_mod");
3220 }
3221
fast_storefield(TosState state)3222 void TemplateTable::fast_storefield(TosState state) {
3223 transition(state, vtos);
3224
3225 ByteSize base = ConstantPoolCache::base_offset();
3226 jvmti_post_fast_field_mod();
3227
3228 // Access constant pool cache.
3229 Register cache = Z_tmp_1;
3230 Register index = Z_tmp_2;
3231 Register flags = Z_ARG5;
3232
3233 // Index comes in bytes, don't shift afterwards!
3234 __ get_cache_and_index_at_bcp(cache, index, 1);
3235
3236 // Test for volatile.
3237 assert(!flags->is_volatile(), "do_oop_store could perform leaf RT call");
3238 __ z_lg(flags, Address(cache, index, base + ConstantPoolCacheEntry::flags_offset()));
3239
3240 // Replace index with field offset from cache entry.
3241 Register field_offset = index;
3242 __ z_lg(field_offset, Address(cache, index, base + ConstantPoolCacheEntry::f2_offset()));
3243
3244 // Get object from stack.
3245 Register obj = cache;
3246
3247 pop_and_check_object(obj);
3248
3249 // field address
3250 const Address field(obj, field_offset);
3251
3252 // access field
3253 switch (bytecode()) {
3254 case Bytecodes::_fast_aputfield:
3255 do_oop_store(_masm, Address(obj, field_offset), Z_tos,
3256 Z_ARG2, Z_ARG3, Z_ARG4, IN_HEAP);
3257 break;
3258 case Bytecodes::_fast_lputfield:
3259 __ reg2mem_opt(Z_tos, field);
3260 break;
3261 case Bytecodes::_fast_iputfield:
3262 __ reg2mem_opt(Z_tos, field, false);
3263 break;
3264 case Bytecodes::_fast_zputfield:
3265 __ z_nilf(Z_tos, 0x1);
3266 // fall through to bputfield
3267 case Bytecodes::_fast_bputfield:
3268 __ z_stc(Z_tos, field);
3269 break;
3270 case Bytecodes::_fast_sputfield:
3271 // fall through
3272 case Bytecodes::_fast_cputfield:
3273 __ z_sth(Z_tos, field);
3274 break;
3275 case Bytecodes::_fast_fputfield:
3276 __ freg2mem_opt(Z_ftos, field, false);
3277 break;
3278 case Bytecodes::_fast_dputfield:
3279 __ freg2mem_opt(Z_ftos, field);
3280 break;
3281 default:
3282 ShouldNotReachHere();
3283 }
3284
3285 // Check for volatile store.
3286 Label notVolatile;
3287
3288 __ testbit(flags, ConstantPoolCacheEntry::is_volatile_shift);
3289 __ z_brz(notVolatile);
3290 __ z_fence();
3291
3292 __ bind(notVolatile);
3293 }
3294
fast_accessfield(TosState state)3295 void TemplateTable::fast_accessfield(TosState state) {
3296 transition(atos, state);
3297
3298 Register obj = Z_tos;
3299
3300 // Do the JVMTI work here to avoid disturbing the register state below
3301 if (JvmtiExport::can_post_field_access()) {
3302 // Check to see if a field access watch has been set before we
3303 // take the time to call into the VM.
3304 Label cont;
3305
3306 __ load_absolute_address(Z_R1_scratch,
3307 (address)JvmtiExport::get_field_access_count_addr());
3308 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch));
3309 __ z_brz(cont);
3310
3311 // Access constant pool cache entry.
3312
3313 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tmp_1, 1);
3314 __ verify_oop(obj);
3315 __ push_ptr(obj); // Save object pointer before call_VM() clobbers it.
3316 __ z_lgr(Z_ARG2, obj);
3317
3318 // Z_ARG2: object pointer copied above
3319 // Z_ARG3: cache entry pointer
3320 __ call_VM(noreg,
3321 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3322 Z_ARG2, Z_ARG3);
3323 __ pop_ptr(obj); // Restore object pointer.
3324
3325 __ bind(cont);
3326 }
3327
3328 // Access constant pool cache.
3329 Register cache = Z_tmp_1;
3330 Register index = Z_tmp_2;
3331
3332 // Index comes in bytes, don't shift afterwards!
3333 __ get_cache_and_index_at_bcp(cache, index, 1);
3334 // Replace index with field offset from cache entry.
3335 __ mem2reg_opt(index,
3336 Address(cache, index,
3337 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3338
3339 __ verify_oop(obj);
3340 __ null_check(obj);
3341
3342 Address field(obj, index);
3343
3344 // access field
3345 switch (bytecode()) {
3346 case Bytecodes::_fast_agetfield:
3347 do_oop_load(_masm, field, Z_tos, Z_tmp_1, Z_tmp_2, IN_HEAP);
3348 __ verify_oop(Z_tos);
3349 return;
3350 case Bytecodes::_fast_lgetfield:
3351 __ mem2reg_opt(Z_tos, field);
3352 return;
3353 case Bytecodes::_fast_igetfield:
3354 __ mem2reg_opt(Z_tos, field, false);
3355 return;
3356 case Bytecodes::_fast_bgetfield:
3357 __ z_lb(Z_tos, field);
3358 return;
3359 case Bytecodes::_fast_sgetfield:
3360 __ z_lh(Z_tos, field);
3361 return;
3362 case Bytecodes::_fast_cgetfield:
3363 __ z_llgh(Z_tos, field); // Load into 64 bits, works on all CPUs.
3364 return;
3365 case Bytecodes::_fast_fgetfield:
3366 __ mem2freg_opt(Z_ftos, field, false);
3367 return;
3368 case Bytecodes::_fast_dgetfield:
3369 __ mem2freg_opt(Z_ftos, field);
3370 return;
3371 default:
3372 ShouldNotReachHere();
3373 }
3374 }
3375
fast_xaccess(TosState state)3376 void TemplateTable::fast_xaccess(TosState state) {
3377 transition(vtos, state);
3378
3379 Register receiver = Z_tos;
3380 // Get receiver.
3381 __ mem2reg_opt(Z_tos, aaddress(0));
3382
3383 // Access constant pool cache.
3384 Register cache = Z_tmp_1;
3385 Register index = Z_tmp_2;
3386
3387 // Index comes in bytes, don't shift afterwards!
3388 __ get_cache_and_index_at_bcp(cache, index, 2);
3389 // Replace index with field offset from cache entry.
3390 __ mem2reg_opt(index,
3391 Address(cache, index,
3392 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3393
3394 // Make sure exception is reported in correct bcp range (getfield is
3395 // next instruction).
3396 __ add2reg(Z_bcp, 1);
3397 __ null_check(receiver);
3398 switch (state) {
3399 case itos:
3400 __ mem2reg_opt(Z_tos, Address(receiver, index), false);
3401 break;
3402 case atos:
3403 do_oop_load(_masm, Address(receiver, index), Z_tos, Z_tmp_1, Z_tmp_2, IN_HEAP);
3404 __ verify_oop(Z_tos);
3405 break;
3406 case ftos:
3407 __ mem2freg_opt(Z_ftos, Address(receiver, index));
3408 break;
3409 default:
3410 ShouldNotReachHere();
3411 }
3412
3413 // Reset bcp to original position.
3414 __ add2reg(Z_bcp, -1);
3415 }
3416
3417 //-----------------------------------------------------------------------------
3418 // Calls
3419
prepare_invoke(int byte_no,Register method,Register index,Register recv,Register flags)3420 void TemplateTable::prepare_invoke(int byte_no,
3421 Register method, // linked method (or i-klass)
3422 Register index, // itable index, MethodType, etc.
3423 Register recv, // If caller wants to see it.
3424 Register flags) { // If caller wants to test it.
3425 // Determine flags.
3426 const Bytecodes::Code code = bytecode();
3427 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3428 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3429 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3430 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3431 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3432 const bool load_receiver = (recv != noreg);
3433 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3434
3435 // Setup registers & access constant pool cache.
3436 if (recv == noreg) { recv = Z_ARG1; }
3437 if (flags == noreg) { flags = Z_ARG2; }
3438 assert_different_registers(method, Z_R14, index, recv, flags);
3439
3440 BLOCK_COMMENT("prepare_invoke {");
3441
3442 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3443
3444 // Maybe push appendix to arguments.
3445 if (is_invokedynamic || is_invokehandle) {
3446 Label L_no_push;
3447 Register resolved_reference = Z_R1_scratch;
3448 __ testbit(flags, ConstantPoolCacheEntry::has_appendix_shift);
3449 __ z_bfalse(L_no_push);
3450 // Push the appendix as a trailing parameter.
3451 // This must be done before we get the receiver,
3452 // since the parameter_size includes it.
3453 __ load_resolved_reference_at_index(resolved_reference, index);
3454 __ verify_oop(resolved_reference);
3455 __ push_ptr(resolved_reference); // Push appendix (MethodType, CallSite, etc.).
3456 __ bind(L_no_push);
3457 }
3458
3459 // Load receiver if needed (after appendix is pushed so parameter size is correct).
3460 if (load_receiver) {
3461 assert(!is_invokedynamic, "");
3462 // recv := int2long(flags & ConstantPoolCacheEntry::parameter_size_mask) << 3
3463 // Flags is zero-extended int2long when loaded during load_invoke_cp_cache_entry().
3464 // Only the least significant byte (psize) of flags is used.
3465 {
3466 const unsigned int logSES = Interpreter::logStackElementSize;
3467 const int bit_shift = logSES;
3468 const int r_bitpos = 63 - bit_shift;
3469 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::parameter_size_bits + 1;
3470 const int n_rotate = bit_shift;
3471 assert(ConstantPoolCacheEntry::parameter_size_mask == 255, "adapt bitpositions");
3472 __ rotate_then_insert(recv, flags, l_bitpos, r_bitpos, n_rotate, true);
3473 }
3474 // Recv now contains #arguments * StackElementSize.
3475
3476 Address recv_addr(Z_esp, recv);
3477 __ z_lg(recv, recv_addr);
3478 __ verify_oop(recv);
3479 }
3480
3481 // Compute return type.
3482 // ret_type is used by callers (invokespecial, invokestatic) at least.
3483 Register ret_type = Z_R1_scratch;
3484 assert_different_registers(ret_type, method);
3485
3486 const address table_addr = (address)Interpreter::invoke_return_entry_table_for(code);
3487 __ load_absolute_address(Z_R14, table_addr);
3488
3489 {
3490 const int bit_shift = LogBytesPerWord; // Size of each table entry.
3491 const int r_bitpos = 63 - bit_shift;
3492 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
3493 const int n_rotate = bit_shift-ConstantPoolCacheEntry::tos_state_shift;
3494 __ rotate_then_insert(ret_type, flags, l_bitpos, r_bitpos, n_rotate, true);
3495 // Make sure we don't need to mask flags for tos_state after the above shift.
3496 ConstantPoolCacheEntry::verify_tos_state_shift();
3497 }
3498
3499 __ z_lg(Z_R14, Address(Z_R14, ret_type)); // Load return address.
3500 BLOCK_COMMENT("} prepare_invoke");
3501 }
3502
3503
invokevirtual_helper(Register index,Register recv,Register flags)3504 void TemplateTable::invokevirtual_helper(Register index,
3505 Register recv,
3506 Register flags) {
3507 // Uses temporary registers Z_tmp_2, Z_ARG4.
3508 assert_different_registers(index, recv, Z_tmp_2, Z_ARG4);
3509
3510 // Test for an invoke of a final method.
3511 Label notFinal;
3512
3513 BLOCK_COMMENT("invokevirtual_helper {");
3514
3515 __ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift);
3516 __ z_brz(notFinal);
3517
3518 const Register method = index; // Method must be Z_ARG3.
3519 assert(method == Z_ARG3, "method must be second argument for interpreter calling convention");
3520
3521 // Do the call - the index is actually the method to call.
3522 // That is, f2 is a vtable index if !is_vfinal, else f2 is a method.
3523
3524 // It's final, need a null check here!
3525 __ null_check(recv);
3526
3527 // Profile this call.
3528 __ profile_final_call(Z_tmp_2);
3529 __ profile_arguments_type(Z_tmp_2, method, Z_ARG5, true); // Argument type profiling.
3530 __ jump_from_interpreted(method, Z_tmp_2);
3531
3532 __ bind(notFinal);
3533
3534 // Get receiver klass.
3535 __ null_check(recv, Z_R0_scratch, oopDesc::klass_offset_in_bytes());
3536 __ load_klass(Z_tmp_2, recv);
3537
3538 // Profile this call.
3539 __ profile_virtual_call(Z_tmp_2, Z_ARG4, Z_ARG5);
3540
3541 // Get target method & entry point.
3542 __ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
3543 __ mem2reg_opt(method,
3544 Address(Z_tmp_2, index,
3545 Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
3546 __ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
3547 __ jump_from_interpreted(method, Z_ARG4);
3548 BLOCK_COMMENT("} invokevirtual_helper");
3549 }
3550
invokevirtual(int byte_no)3551 void TemplateTable::invokevirtual(int byte_no) {
3552 transition(vtos, vtos);
3553
3554 assert(byte_no == f2_byte, "use this argument");
3555 prepare_invoke(byte_no,
3556 Z_ARG3, // method or vtable index
3557 noreg, // unused itable index
3558 Z_ARG1, // recv
3559 Z_ARG2); // flags
3560
3561 // Z_ARG3 : index
3562 // Z_ARG1 : receiver
3563 // Z_ARG2 : flags
3564 invokevirtual_helper(Z_ARG3, Z_ARG1, Z_ARG2);
3565 }
3566
invokespecial(int byte_no)3567 void TemplateTable::invokespecial(int byte_no) {
3568 transition(vtos, vtos);
3569
3570 assert(byte_no == f1_byte, "use this argument");
3571 Register Rmethod = Z_tmp_2;
3572 prepare_invoke(byte_no, Rmethod, noreg, // Get f1 method.
3573 Z_ARG3); // Get receiver also for null check.
3574 __ verify_oop(Z_ARG3);
3575 __ null_check(Z_ARG3);
3576 // Do the call.
3577 __ profile_call(Z_ARG2);
3578 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
3579 __ jump_from_interpreted(Rmethod, Z_R1_scratch);
3580 }
3581
invokestatic(int byte_no)3582 void TemplateTable::invokestatic(int byte_no) {
3583 transition(vtos, vtos);
3584
3585 assert(byte_no == f1_byte, "use this argument");
3586 Register Rmethod = Z_tmp_2;
3587 prepare_invoke(byte_no, Rmethod); // Get f1 method.
3588 // Do the call.
3589 __ profile_call(Z_ARG2);
3590 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
3591 __ jump_from_interpreted(Rmethod, Z_R1_scratch);
3592 }
3593
3594 // Outdated feature, and we don't support it.
fast_invokevfinal(int byte_no)3595 void TemplateTable::fast_invokevfinal(int byte_no) {
3596 transition(vtos, vtos);
3597 assert(byte_no == f2_byte, "use this argument");
3598 __ stop("fast_invokevfinal not used on linuxs390x");
3599 }
3600
invokeinterface(int byte_no)3601 void TemplateTable::invokeinterface(int byte_no) {
3602 transition(vtos, vtos);
3603
3604 assert(byte_no == f1_byte, "use this argument");
3605 Register klass = Z_ARG2,
3606 method = Z_ARG3,
3607 interface = Z_ARG4,
3608 flags = Z_ARG5,
3609 receiver = Z_tmp_1;
3610
3611 BLOCK_COMMENT("invokeinterface {");
3612
3613 prepare_invoke(byte_no, interface, method, // Get f1 klassOop, f2 Method*.
3614 receiver, flags);
3615
3616 // Z_R14 (== Z_bytecode) : return entry
3617
3618 // First check for Object case, then private interface method,
3619 // then regular interface method.
3620
3621 // Special case of invokeinterface called for virtual method of
3622 // java.lang.Object. See cpCache.cpp for details.
3623 NearLabel notObjectMethod, no_such_method;
3624 __ testbit(flags, ConstantPoolCacheEntry::is_forced_virtual_shift);
3625 __ z_brz(notObjectMethod);
3626 invokevirtual_helper(method, receiver, flags);
3627 __ bind(notObjectMethod);
3628
3629 // Check for private method invocation - indicated by vfinal
3630 NearLabel notVFinal;
3631 __ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift);
3632 __ z_brz(notVFinal);
3633
3634 // Get receiver klass into klass - also a null check.
3635 __ load_klass(klass, receiver);
3636
3637 NearLabel subtype, no_such_interface;
3638
3639 __ check_klass_subtype(klass, interface, Z_tmp_2, flags/*scratch*/, subtype);
3640 // If we get here the typecheck failed
3641 __ z_bru(no_such_interface);
3642 __ bind(subtype);
3643
3644 // do the call
3645 __ profile_final_call(Z_tmp_2);
3646 __ profile_arguments_type(Z_tmp_2, method, Z_ARG5, true);
3647 __ jump_from_interpreted(method, Z_tmp_2);
3648
3649 __ bind(notVFinal);
3650
3651 // Get receiver klass into klass - also a null check.
3652 __ load_klass(klass, receiver);
3653
3654 __ lookup_interface_method(klass, interface, noreg, noreg, /*temp*/Z_ARG1,
3655 no_such_interface, /*return_method=*/false);
3656
3657 // Profile this call.
3658 __ profile_virtual_call(klass, Z_ARG1/*mdp*/, flags/*scratch*/);
3659
3660 // Find entry point to call.
3661
3662 // Get declaring interface class from method
3663 __ z_lg(interface, Address(method, Method::const_offset()));
3664 __ z_lg(interface, Address(interface, ConstMethod::constants_offset()));
3665 __ z_lg(interface, Address(interface, ConstantPool::pool_holder_offset_in_bytes()));
3666
3667 // Get itable index from method
3668 Register index = receiver,
3669 method2 = flags;
3670 __ z_lgf(index, Address(method, Method::itable_index_offset()));
3671 __ z_aghi(index, -Method::itable_index_max);
3672 __ z_lcgr(index, index);
3673
3674 __ lookup_interface_method(klass, interface, index, method2, Z_tmp_2,
3675 no_such_interface);
3676
3677 // Check for abstract method error.
3678 // Note: This should be done more efficiently via a throw_abstract_method_error
3679 // interpreter entry point and a conditional jump to it in case of a null
3680 // method.
3681 __ compareU64_and_branch(method2, (intptr_t) 0,
3682 Assembler::bcondZero, no_such_method);
3683
3684 __ profile_arguments_type(Z_tmp_1, method2, Z_tmp_2, true);
3685
3686 // Do the call.
3687 __ jump_from_interpreted(method2, Z_tmp_2);
3688 __ should_not_reach_here();
3689
3690 // exception handling code follows...
3691 // Note: Must restore interpreter registers to canonical
3692 // state for exception handling to work correctly!
3693
3694 __ bind(no_such_method);
3695
3696 // Throw exception.
3697 // Pass arguments for generating a verbose error message.
3698 __ z_lgr(Z_tmp_1, method); // Prevent register clash.
3699 __ call_VM(noreg,
3700 CAST_FROM_FN_PTR(address,
3701 InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3702 klass, Z_tmp_1);
3703 // The call_VM checks for exception, so we should never return here.
3704 __ should_not_reach_here();
3705
3706 __ bind(no_such_interface);
3707
3708 // Throw exception.
3709 // Pass arguments for generating a verbose error message.
3710 __ call_VM(noreg,
3711 CAST_FROM_FN_PTR(address,
3712 InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3713 klass, interface);
3714 // The call_VM checks for exception, so we should never return here.
3715 __ should_not_reach_here();
3716
3717 BLOCK_COMMENT("} invokeinterface");
3718 return;
3719 }
3720
invokehandle(int byte_no)3721 void TemplateTable::invokehandle(int byte_no) {
3722 transition(vtos, vtos);
3723
3724 const Register method = Z_tmp_2;
3725 const Register recv = Z_ARG5;
3726 const Register mtype = Z_tmp_1;
3727 prepare_invoke(byte_no,
3728 method, mtype, // Get f2 method, f1 MethodType.
3729 recv);
3730 __ verify_method_ptr(method);
3731 __ verify_oop(recv);
3732 __ null_check(recv);
3733
3734 // Note: Mtype is already pushed (if necessary) by prepare_invoke.
3735
3736 // FIXME: profile the LambdaForm also.
3737 __ profile_final_call(Z_ARG2);
3738 __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true);
3739
3740 __ jump_from_interpreted(method, Z_ARG3);
3741 }
3742
invokedynamic(int byte_no)3743 void TemplateTable::invokedynamic(int byte_no) {
3744 transition(vtos, vtos);
3745
3746 const Register Rmethod = Z_tmp_2;
3747 const Register Rcallsite = Z_tmp_1;
3748
3749 prepare_invoke(byte_no, Rmethod, Rcallsite);
3750
3751 // Rmethod: CallSite object (from f1)
3752 // Rcallsite: MH.linkToCallSite method (from f2)
3753
3754 // Note: Callsite is already pushed by prepare_invoke.
3755
3756 // TODO: should make a type profile for any invokedynamic that takes a ref argument.
3757 // Profile this call.
3758 __ profile_call(Z_ARG2);
3759 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
3760 __ jump_from_interpreted(Rmethod, Z_ARG2);
3761 }
3762
3763 //-----------------------------------------------------------------------------
3764 // Allocation
3765
3766 // Original comment on "allow_shared_alloc":
3767 // Always go the slow path.
3768 // + Eliminated optimization within the template-based interpreter:
3769 // If an allocation is done within the interpreter without using
3770 // tlabs, the interpreter tries to do the allocation directly
3771 // on the heap.
3772 // + That means the profiling hooks are not considered and allocations
3773 // get lost for the profiling framework.
3774 // + However, we do not think that this optimization is really needed,
3775 // so we always go now the slow path through the VM in this case --
3776 // spec jbb2005 shows no measurable performance degradation.
_new()3777 void TemplateTable::_new() {
3778 transition(vtos, atos);
3779 address prev_instr_address = NULL;
3780 Register tags = Z_tmp_1;
3781 Register RallocatedObject = Z_tos;
3782 Register cpool = Z_ARG2;
3783 Register tmp = Z_ARG3; // RobjectFields==tmp and Rsize==offset must be a register pair.
3784 Register offset = Z_ARG4;
3785 Label slow_case;
3786 Label done;
3787 Label initialize_header;
3788 Label allocate_shared;
3789
3790 BLOCK_COMMENT("TemplateTable::_new {");
3791 __ get_2_byte_integer_at_bcp(offset/*dest*/, 1, InterpreterMacroAssembler::Unsigned);
3792 __ get_cpool_and_tags(cpool, tags);
3793 // Make sure the class we're about to instantiate has been resolved.
3794 // This is done before loading InstanceKlass to be consistent with the order
3795 // how Constant Pool is updated (see ConstantPool::klass_at_put).
3796 const int tags_offset = Array<u1>::base_offset_in_bytes();
3797 __ load_address(tmp, Address(tags, offset, tags_offset));
3798 __ z_cli(0, tmp, JVM_CONSTANT_Class);
3799 __ z_brne(slow_case);
3800
3801 __ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset.
3802 // Get InstanceKlass.
3803 Register iklass = cpool;
3804 __ load_resolved_klass_at_offset(cpool, offset, iklass);
3805
3806 // Make sure klass is initialized & doesn't have finalizer.
3807 // Make sure klass is fully initialized.
3808 const int state_offset = in_bytes(InstanceKlass::init_state_offset());
3809 if (Immediate::is_uimm12(state_offset)) {
3810 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized);
3811 } else {
3812 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized);
3813 }
3814 __ z_brne(slow_case);
3815
3816 // Get instance_size in InstanceKlass (scaled to a count of bytes).
3817 Register Rsize = offset;
3818 __ z_llgf(Rsize, Address(iklass, Klass::layout_helper_offset()));
3819 __ z_tmll(Rsize, Klass::_lh_instance_slow_path_bit);
3820 __ z_btrue(slow_case);
3821
3822 // Allocate the instance
3823 // 1) Try to allocate in the TLAB.
3824 // 2) If the above fails (or is not applicable), go to a slow case
3825 // (creates a new TLAB, etc.).
3826 // Note: compared to other architectures, s390's implementation always goes
3827 // to the slow path if TLAB is used and fails.
3828 if (UseTLAB) {
3829 Register RoldTopValue = RallocatedObject;
3830 Register RnewTopValue = tmp;
3831 __ z_lg(RoldTopValue, Address(Z_thread, JavaThread::tlab_top_offset()));
3832 __ load_address(RnewTopValue, Address(RoldTopValue, Rsize));
3833 __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_end_offset()));
3834 __ z_brh(slow_case);
3835 __ z_stg(RnewTopValue, Address(Z_thread, JavaThread::tlab_top_offset()));
3836
3837 Register RobjectFields = tmp;
3838 Register Rzero = Z_R1_scratch;
3839 __ clear_reg(Rzero, true /*whole reg*/, false); // Load 0L into Rzero. Don't set CC.
3840
3841 if (!ZeroTLAB) {
3842 // The object is initialized before the header. If the object size is
3843 // zero, go directly to the header initialization.
3844 __ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC.
3845 __ z_bre(initialize_header); // Jump if size of fields is zero.
3846
3847 // Initialize object fields.
3848 // See documentation for MVCLE instruction!!!
3849 assert(RobjectFields->encoding() % 2 == 0, "RobjectFields must be an even register");
3850 assert(Rsize->encoding() == (RobjectFields->encoding()+1),
3851 "RobjectFields and Rsize must be a register pair");
3852 assert(Rzero->encoding() % 2 == 1, "Rzero must be an odd register");
3853
3854 // Set Rzero to 0 and use it as src length, then mvcle will copy nothing
3855 // and fill the object with the padding value 0.
3856 __ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject);
3857 __ move_long_ext(RobjectFields, as_Register(Rzero->encoding() - 1), 0);
3858 }
3859
3860 // Initialize object header only.
3861 __ bind(initialize_header);
3862 if (UseBiasedLocking) {
3863 Register prototype = RobjectFields;
3864 __ z_lg(prototype, Address(iklass, Klass::prototype_header_offset()));
3865 __ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes()));
3866 } else {
3867 __ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
3868 (long)markOopDesc::prototype());
3869 }
3870
3871 __ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops.
3872 __ store_klass(iklass, RallocatedObject); // Store klass last.
3873
3874 {
3875 SkipIfEqual skip(_masm, &DTraceAllocProbes, false, Z_ARG5 /*scratch*/);
3876 // Trigger dtrace event for fastpath.
3877 __ push(atos); // Save the return value.
3878 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), RallocatedObject);
3879 __ pop(atos); // Restore the return value.
3880 }
3881 __ z_bru(done);
3882 }
3883
3884 // slow case
3885 __ bind(slow_case);
3886 __ get_constant_pool(Z_ARG2);
3887 __ get_2_byte_integer_at_bcp(Z_ARG3/*dest*/, 1, InterpreterMacroAssembler::Unsigned);
3888 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Z_ARG2, Z_ARG3);
3889 __ verify_oop(Z_tos);
3890
3891 // continue
3892 __ bind(done);
3893
3894 BLOCK_COMMENT("} TemplateTable::_new");
3895 }
3896
newarray()3897 void TemplateTable::newarray() {
3898 transition(itos, atos);
3899
3900 // Call runtime.
3901 __ z_llgc(Z_ARG2, at_bcp(1)); // type
3902 __ z_lgfr(Z_ARG3, Z_tos); // size
3903 call_VM(Z_RET,
3904 CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3905 Z_ARG2, Z_ARG3);
3906 }
3907
anewarray()3908 void TemplateTable::anewarray() {
3909 transition(itos, atos);
3910 __ get_2_byte_integer_at_bcp(Z_ARG3, 1, InterpreterMacroAssembler::Unsigned);
3911 __ get_constant_pool(Z_ARG2);
3912 __ z_lgfr(Z_ARG4, Z_tos);
3913 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3914 Z_ARG2, Z_ARG3, Z_ARG4);
3915 }
3916
arraylength()3917 void TemplateTable::arraylength() {
3918 transition(atos, itos);
3919
3920 int offset = arrayOopDesc::length_offset_in_bytes();
3921
3922 __ null_check(Z_tos, Z_R0_scratch, offset);
3923 __ mem2reg_opt(Z_tos, Address(Z_tos, offset), false);
3924 }
3925
checkcast()3926 void TemplateTable::checkcast() {
3927 transition(atos, atos);
3928
3929 NearLabel done, is_null, ok_is_subtype, quicked, resolved;
3930
3931 BLOCK_COMMENT("checkcast {");
3932 // If object is NULL, we are almost done.
3933 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null);
3934
3935 // Get cpool & tags index.
3936 Register cpool = Z_tmp_1;
3937 Register tags = Z_tmp_2;
3938 Register index = Z_ARG5;
3939
3940 __ get_cpool_and_tags(cpool, tags);
3941 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned);
3942 // See if bytecode has already been quicked.
3943 // Note: For CLI, we would have to add the index to the tags pointer first,
3944 // thus load and compare in a "classic" manner.
3945 __ z_llgc(Z_R0_scratch,
3946 Address(tags, index, Array<u1>::base_offset_in_bytes()));
3947 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class,
3948 Assembler::bcondEqual, quicked);
3949
3950 __ push(atos); // Save receiver for result, and for GC.
3951 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3952 __ get_vm_result_2(Z_tos);
3953
3954 Register receiver = Z_ARG4;
3955 Register klass = Z_tos;
3956 Register subklass = Z_ARG5;
3957
3958 __ pop_ptr(receiver); // restore receiver
3959 __ z_bru(resolved);
3960
3961 // Get superklass in klass and subklass in subklass.
3962 __ bind(quicked);
3963
3964 __ z_lgr(Z_ARG4, Z_tos); // Save receiver.
3965 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
3966 __ load_resolved_klass_at_offset(cpool, index, klass);
3967
3968 __ bind(resolved);
3969
3970 __ load_klass(subklass, receiver);
3971
3972 // Generate subtype check. Object in receiver.
3973 // Superklass in klass. Subklass in subklass.
3974 __ gen_subtype_check(subklass, klass, Z_ARG3, Z_tmp_1, ok_is_subtype);
3975
3976 // Come here on failure.
3977 __ push_ptr(receiver);
3978 // Object is at TOS, target klass oop expected in rax by convention.
3979 __ z_brul((address) Interpreter::_throw_ClassCastException_entry);
3980
3981 // Come here on success.
3982 __ bind(ok_is_subtype);
3983
3984 __ z_lgr(Z_tos, receiver); // Restore object.
3985
3986 // Collect counts on whether this test sees NULLs a lot or not.
3987 if (ProfileInterpreter) {
3988 __ z_bru(done);
3989 __ bind(is_null);
3990 __ profile_null_seen(Z_tmp_1);
3991 } else {
3992 __ bind(is_null); // Same as 'done'.
3993 }
3994
3995 __ bind(done);
3996 BLOCK_COMMENT("} checkcast");
3997 }
3998
instanceof()3999 void TemplateTable::instanceof() {
4000 transition(atos, itos);
4001
4002 NearLabel done, is_null, ok_is_subtype, quicked, resolved;
4003
4004 BLOCK_COMMENT("instanceof {");
4005 // If object is NULL, we are almost done.
4006 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null);
4007
4008 // Get cpool & tags index.
4009 Register cpool = Z_tmp_1;
4010 Register tags = Z_tmp_2;
4011 Register index = Z_ARG5;
4012
4013 __ get_cpool_and_tags(cpool, tags);
4014 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned);
4015 // See if bytecode has already been quicked.
4016 // Note: For CLI, we would have to add the index to the tags pointer first,
4017 // thus load and compare in a "classic" manner.
4018 __ z_llgc(Z_R0_scratch,
4019 Address(tags, index, Array<u1>::base_offset_in_bytes()));
4020 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class, Assembler::bcondEqual, quicked);
4021
4022 __ push(atos); // Save receiver for result, and for GC.
4023 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4024 __ get_vm_result_2(Z_tos);
4025
4026 Register receiver = Z_tmp_2;
4027 Register klass = Z_tos;
4028 Register subklass = Z_tmp_2;
4029
4030 __ pop_ptr(receiver); // Restore receiver.
4031 __ verify_oop(receiver);
4032 __ load_klass(subklass, subklass);
4033 __ z_bru(resolved);
4034
4035 // Get superklass in klass and subklass in subklass.
4036 __ bind(quicked);
4037
4038 __ load_klass(subklass, Z_tos);
4039 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
4040 __ load_resolved_klass_at_offset(cpool, index, klass);
4041
4042 __ bind(resolved);
4043
4044 // Generate subtype check.
4045 // Superklass in klass. Subklass in subklass.
4046 __ gen_subtype_check(subklass, klass, Z_ARG4, Z_ARG5, ok_is_subtype);
4047
4048 // Come here on failure.
4049 __ clear_reg(Z_tos, true, false);
4050 __ z_bru(done);
4051
4052 // Come here on success.
4053 __ bind(ok_is_subtype);
4054 __ load_const_optimized(Z_tos, 1);
4055
4056 // Collect counts on whether this test sees NULLs a lot or not.
4057 if (ProfileInterpreter) {
4058 __ z_bru(done);
4059 __ bind(is_null);
4060 __ profile_null_seen(Z_tmp_1);
4061 } else {
4062 __ bind(is_null); // same as 'done'
4063 }
4064
4065 __ bind(done);
4066 // tos = 0: obj == NULL or obj is not an instanceof the specified klass
4067 // tos = 1: obj != NULL and obj is an instanceof the specified klass
4068 BLOCK_COMMENT("} instanceof");
4069 }
4070
4071 //-----------------------------------------------------------------------------
4072 // Breakpoints
_breakpoint()4073 void TemplateTable::_breakpoint() {
4074
4075 // Note: We get here even if we are single stepping.
4076 // Jbug insists on setting breakpoints at every bytecode
4077 // even if we are in single step mode.
4078
4079 transition(vtos, vtos);
4080
4081 // Get the unpatched byte code.
4082 __ get_method(Z_ARG2);
4083 __ call_VM(noreg,
4084 CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at),
4085 Z_ARG2, Z_bcp);
4086 // Save the result to a register that is preserved over C-function calls.
4087 __ z_lgr(Z_tmp_1, Z_RET);
4088
4089 // Post the breakpoint event.
4090 __ get_method(Z_ARG2);
4091 __ call_VM(noreg,
4092 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4093 Z_ARG2, Z_bcp);
4094
4095 // Must restore the bytecode, because call_VM destroys Z_bytecode.
4096 __ z_lgr(Z_bytecode, Z_tmp_1);
4097
4098 // Complete the execution of original bytecode.
4099 __ dispatch_only_normal(vtos);
4100 }
4101
4102
4103 // Exceptions
4104
athrow()4105 void TemplateTable::athrow() {
4106 transition(atos, vtos);
4107 __ null_check(Z_tos);
4108 __ load_absolute_address(Z_ARG2, Interpreter::throw_exception_entry());
4109 __ z_br(Z_ARG2);
4110 }
4111
4112 // Synchronization
4113 //
4114 // Note: monitorenter & exit are symmetric routines; which is reflected
4115 // in the assembly code structure as well
4116 //
4117 // Stack layout:
4118 //
4119 // callers_sp <- Z_SP (callers_sp == Z_fp (own fp))
4120 // return_pc
4121 // [rest of ABI_160]
4122 // /slot o: free
4123 // / ... free
4124 // oper. | slot n+1: free <- Z_esp points to first free slot
4125 // stack | slot n: val caches IJAVA_STATE.esp
4126 // | ...
4127 // \slot 0: val
4128 // /slot m <- IJAVA_STATE.monitors = monitor block top
4129 // | ...
4130 // monitors| slot 2
4131 // | slot 1
4132 // \slot 0
4133 // /slot l <- monitor block bot
4134 // ijava_state | ...
4135 // | slot 2
4136 // \slot 0
4137 // <- Z_fp
monitorenter()4138 void TemplateTable::monitorenter() {
4139 transition(atos, vtos);
4140
4141 BLOCK_COMMENT("monitorenter {");
4142
4143 // Check for NULL object.
4144 __ null_check(Z_tos);
4145 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4146 NearLabel allocated;
4147 // Initialize entry pointer.
4148 const Register Rfree_slot = Z_tmp_1;
4149 __ clear_reg(Rfree_slot, true, false); // Points to free slot or NULL. Don't set CC.
4150
4151 // Find a free slot in the monitor block from top to bot (result in Rfree_slot).
4152 {
4153 const Register Rcurr_monitor = Z_ARG2;
4154 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block.
4155 const Register Rlocked_obj = Z_ARG4;
4156 NearLabel loop, exit, not_free;
4157 // Starting with top-most entry.
4158 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors
4159 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp);
4160
4161 #ifdef ASSERT
4162 address reentry = NULL;
4163 { NearLabel ok;
4164 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok);
4165 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom");
4166 __ bind(ok);
4167 }
4168 { NearLabel ok;
4169 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok);
4170 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp");
4171 __ bind(ok);
4172 }
4173 #endif
4174
4175 // Check if bottom reached, i.e. if there is at least one monitor.
4176 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, exit);
4177
4178 __ bind(loop);
4179 // Check if current entry is used.
4180 __ load_and_test_long(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes()));
4181 __ z_brne(not_free);
4182 // If not used then remember entry in Rfree_slot.
4183 __ z_lgr(Rfree_slot, Rcurr_monitor);
4184 __ bind(not_free);
4185 // Exit if current entry is for same object; this guarantees, that new monitor
4186 // used for recursive lock is above the older one.
4187 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, exit);
4188 // otherwise advance to next entry
4189 __ add2reg(Rcurr_monitor, entry_size);
4190 // Check if bottom reached, if not at bottom then check this entry.
4191 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop);
4192 __ bind(exit);
4193 }
4194
4195 // Rfree_slot != NULL -> found one
4196 __ compareU64_and_branch(Rfree_slot, (intptr_t)0L, Assembler::bcondNotEqual, allocated);
4197
4198 // Allocate one if there's no free slot.
4199 __ add_monitor_to_stack(false, Z_ARG3, Z_ARG4, Z_ARG5);
4200 __ get_monitors(Rfree_slot);
4201
4202 // Rfree_slot: points to monitor entry.
4203 __ bind(allocated);
4204
4205 // Increment bcp to point to the next bytecode, so exception
4206 // handling for async. exceptions work correctly.
4207 // The object has already been poped from the stack, so the
4208 // expression stack looks correct.
4209 __ add2reg(Z_bcp, 1, Z_bcp);
4210
4211 // Store object.
4212 __ z_stg(Z_tos, BasicObjectLock::obj_offset_in_bytes(), Rfree_slot);
4213 __ lock_object(Rfree_slot, Z_tos);
4214
4215 // Check to make sure this monitor doesn't cause stack overflow after locking.
4216 __ save_bcp(); // in case of exception
4217 __ generate_stack_overflow_check(0);
4218
4219 // The bcp has already been incremented. Just need to dispatch to
4220 // next instruction.
4221 __ dispatch_next(vtos);
4222
4223 BLOCK_COMMENT("} monitorenter");
4224 }
4225
4226
monitorexit()4227 void TemplateTable::monitorexit() {
4228 transition(atos, vtos);
4229
4230 BLOCK_COMMENT("monitorexit {");
4231
4232 // Check for NULL object.
4233 __ null_check(Z_tos);
4234
4235 NearLabel found, not_found;
4236 const Register Rcurr_monitor = Z_ARG2;
4237
4238 // Find matching slot.
4239 {
4240 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4241 NearLabel entry, loop;
4242
4243 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block.
4244 const Register Rlocked_obj = Z_ARG4;
4245 // Starting with top-most entry.
4246 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors
4247 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp);
4248
4249 #ifdef ASSERT
4250 address reentry = NULL;
4251 { NearLabel ok;
4252 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok);
4253 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom");
4254 __ bind(ok);
4255 }
4256 { NearLabel ok;
4257 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok);
4258 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp");
4259 __ bind(ok);
4260 }
4261 #endif
4262
4263 // Check if bottom reached, i.e. if there is at least one monitor.
4264 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, not_found);
4265
4266 __ bind(loop);
4267 // Check if current entry is for same object.
4268 __ z_lg(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes()));
4269 // If same object then stop searching.
4270 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, found);
4271 // Otherwise advance to next entry.
4272 __ add2reg(Rcurr_monitor, entry_size);
4273 // Check if bottom reached, if not at bottom then check this entry.
4274 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop);
4275 }
4276
4277 __ bind(not_found);
4278 // Error handling. Unlocking was not block-structured.
4279 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4280 InterpreterRuntime::throw_illegal_monitor_state_exception));
4281 __ should_not_reach_here();
4282
4283 __ bind(found);
4284 __ push_ptr(Z_tos); // Make sure object is on stack (contract with oopMaps).
4285 __ unlock_object(Rcurr_monitor, Z_tos);
4286 __ pop_ptr(Z_tos); // Discard object.
4287 BLOCK_COMMENT("} monitorexit");
4288 }
4289
4290 // Wide instructions
wide()4291 void TemplateTable::wide() {
4292 transition(vtos, vtos);
4293
4294 __ z_llgc(Z_R1_scratch, at_bcp(1));
4295 __ z_sllg(Z_R1_scratch, Z_R1_scratch, LogBytesPerWord);
4296 __ load_absolute_address(Z_tmp_1, (address) Interpreter::_wentry_point);
4297 __ mem2reg_opt(Z_tmp_1, Address(Z_tmp_1, Z_R1_scratch));
4298 __ z_br(Z_tmp_1);
4299 // Note: the bcp increment step is part of the individual wide
4300 // bytecode implementations.
4301 }
4302
4303 // Multi arrays
multianewarray()4304 void TemplateTable::multianewarray() {
4305 transition(vtos, atos);
4306
4307 __ z_llgc(Z_tmp_1, at_bcp(3)); // Get number of dimensions.
4308 // Slot count to byte offset.
4309 __ z_sllg(Z_tmp_1, Z_tmp_1, Interpreter::logStackElementSize);
4310 // Z_esp points past last_dim, so set to Z_ARG2 to first_dim address.
4311 __ load_address(Z_ARG2, Address(Z_esp, Z_tmp_1));
4312 call_VM(Z_RET,
4313 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4314 Z_ARG2);
4315 // Pop dimensions from expression stack.
4316 __ z_agr(Z_esp, Z_tmp_1);
4317 }
4318