1 /*
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
39
40 #ifndef CC_INTERP
41
42 #define __ _masm->
43
44 // Platform-dependent initialization
45
pd_initialize()46 void TemplateTable::pd_initialize() {
47 // No amd64 specific initialization
48 }
49
50 // Address computation: local variables
51
iaddress(int n)52 static inline Address iaddress(int n) {
53 return Address(r14, Interpreter::local_offset_in_bytes(n));
54 }
55
laddress(int n)56 static inline Address laddress(int n) {
57 return iaddress(n + 1);
58 }
59
faddress(int n)60 static inline Address faddress(int n) {
61 return iaddress(n);
62 }
63
daddress(int n)64 static inline Address daddress(int n) {
65 return laddress(n);
66 }
67
aaddress(int n)68 static inline Address aaddress(int n) {
69 return iaddress(n);
70 }
71
iaddress(Register r)72 static inline Address iaddress(Register r) {
73 return Address(r14, r, Address::times_8);
74 }
75
laddress(Register r)76 static inline Address laddress(Register r) {
77 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
78 }
79
faddress(Register r)80 static inline Address faddress(Register r) {
81 return iaddress(r);
82 }
83
daddress(Register r)84 static inline Address daddress(Register r) {
85 return laddress(r);
86 }
87
aaddress(Register r)88 static inline Address aaddress(Register r) {
89 return iaddress(r);
90 }
91
at_rsp()92 static inline Address at_rsp() {
93 return Address(rsp, 0);
94 }
95
96 // At top of Java expression stack which may be different than esp(). It
97 // isn't for category 1 objects.
at_tos()98 static inline Address at_tos () {
99 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
100 }
101
at_tos_p1()102 static inline Address at_tos_p1() {
103 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
104 }
105
at_tos_p2()106 static inline Address at_tos_p2() {
107 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
108 }
109
110 // Condition conversion
j_not(TemplateTable::Condition cc)111 static Assembler::Condition j_not(TemplateTable::Condition cc) {
112 switch (cc) {
113 case TemplateTable::equal : return Assembler::notEqual;
114 case TemplateTable::not_equal : return Assembler::equal;
115 case TemplateTable::less : return Assembler::greaterEqual;
116 case TemplateTable::less_equal : return Assembler::greater;
117 case TemplateTable::greater : return Assembler::lessEqual;
118 case TemplateTable::greater_equal: return Assembler::less;
119 }
120 ShouldNotReachHere();
121 return Assembler::zero;
122 }
123
124
125 // Miscelaneous helper routines
126 // Store an oop (or NULL) at the address described by obj.
127 // If val == noreg this means store a NULL
128
do_oop_store(InterpreterMacroAssembler * _masm,Address obj,Register val,BarrierSet::Name barrier,bool precise)129 static void do_oop_store(InterpreterMacroAssembler* _masm,
130 Address obj,
131 Register val,
132 BarrierSet::Name barrier,
133 bool precise) {
134 assert(val == noreg || val == rax, "parameter is just for looks");
135 switch (barrier) {
136 #if INCLUDE_ALL_GCS
137 case BarrierSet::G1SATBCT:
138 case BarrierSet::G1SATBCTLogging:
139 {
140 // flatten object address if needed
141 if (obj.index() == noreg && obj.disp() == 0) {
142 if (obj.base() != rdx) {
143 __ movq(rdx, obj.base());
144 }
145 } else {
146 __ leaq(rdx, obj);
147 }
148 __ g1_write_barrier_pre(rdx /* obj */,
149 rbx /* pre_val */,
150 r15_thread /* thread */,
151 r8 /* tmp */,
152 val != noreg /* tosca_live */,
153 false /* expand_call */);
154 if (val == noreg) {
155 __ store_heap_oop_null(Address(rdx, 0));
156 } else {
157 // G1 barrier needs uncompressed oop for region cross check.
158 Register new_val = val;
159 if (UseCompressedOops) {
160 new_val = rbx;
161 __ movptr(new_val, val);
162 }
163 __ store_heap_oop(Address(rdx, 0), val);
164 __ g1_write_barrier_post(rdx /* store_adr */,
165 new_val /* new_val */,
166 r15_thread /* thread */,
167 r8 /* tmp */,
168 rbx /* tmp2 */);
169 }
170 }
171 break;
172 #endif // INCLUDE_ALL_GCS
173 case BarrierSet::CardTableModRef:
174 case BarrierSet::CardTableExtension:
175 {
176 if (val == noreg) {
177 __ store_heap_oop_null(obj);
178 } else {
179 __ store_heap_oop(obj, val);
180 // flatten object address if needed
181 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
182 __ store_check(obj.base());
183 } else {
184 __ leaq(rdx, obj);
185 __ store_check(rdx);
186 }
187 }
188 }
189 break;
190 case BarrierSet::ModRef:
191 case BarrierSet::Other:
192 if (val == noreg) {
193 __ store_heap_oop_null(obj);
194 } else {
195 __ store_heap_oop(obj, val);
196 }
197 break;
198 default :
199 ShouldNotReachHere();
200
201 }
202 }
203
at_bcp(int offset)204 Address TemplateTable::at_bcp(int offset) {
205 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
206 return Address(r13, offset);
207 }
208
patch_bytecode(Bytecodes::Code bc,Register bc_reg,Register temp_reg,bool load_bc_into_bc_reg,int byte_no)209 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
210 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
211 int byte_no) {
212 if (!RewriteBytecodes) return;
213 Label L_patch_done;
214
215 switch (bc) {
216 case Bytecodes::_fast_aputfield:
217 case Bytecodes::_fast_bputfield:
218 case Bytecodes::_fast_zputfield:
219 case Bytecodes::_fast_cputfield:
220 case Bytecodes::_fast_dputfield:
221 case Bytecodes::_fast_fputfield:
222 case Bytecodes::_fast_iputfield:
223 case Bytecodes::_fast_lputfield:
224 case Bytecodes::_fast_sputfield:
225 {
226 // We skip bytecode quickening for putfield instructions when
227 // the put_code written to the constant pool cache is zero.
228 // This is required so that every execution of this instruction
229 // calls out to InterpreterRuntime::resolve_get_put to do
230 // additional, required work.
231 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
232 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
233 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
234 __ movl(bc_reg, bc);
235 __ cmpl(temp_reg, (int) 0);
236 __ jcc(Assembler::zero, L_patch_done); // don't patch
237 }
238 break;
239 default:
240 assert(byte_no == -1, "sanity");
241 // the pair bytecodes have already done the load.
242 if (load_bc_into_bc_reg) {
243 __ movl(bc_reg, bc);
244 }
245 }
246
247 if (JvmtiExport::can_post_breakpoint()) {
248 Label L_fast_patch;
249 // if a breakpoint is present we can't rewrite the stream directly
250 __ movzbl(temp_reg, at_bcp(0));
251 __ cmpl(temp_reg, Bytecodes::_breakpoint);
252 __ jcc(Assembler::notEqual, L_fast_patch);
253 __ get_method(temp_reg);
254 // Let breakpoint table handling rewrite to quicker bytecode
255 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
256 #ifndef ASSERT
257 __ jmpb(L_patch_done);
258 #else
259 __ jmp(L_patch_done);
260 #endif
261 __ bind(L_fast_patch);
262 }
263
264 #ifdef ASSERT
265 Label L_okay;
266 __ load_unsigned_byte(temp_reg, at_bcp(0));
267 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
268 __ jcc(Assembler::equal, L_okay);
269 __ cmpl(temp_reg, bc_reg);
270 __ jcc(Assembler::equal, L_okay);
271 __ stop("patching the wrong bytecode");
272 __ bind(L_okay);
273 #endif
274
275 // patch bytecode
276 __ movb(at_bcp(0), bc_reg);
277 __ bind(L_patch_done);
278 }
279
280
281 // Individual instructions
282
nop()283 void TemplateTable::nop() {
284 transition(vtos, vtos);
285 // nothing to do
286 }
287
shouldnotreachhere()288 void TemplateTable::shouldnotreachhere() {
289 transition(vtos, vtos);
290 __ stop("shouldnotreachhere bytecode");
291 }
292
aconst_null()293 void TemplateTable::aconst_null() {
294 transition(vtos, atos);
295 __ xorl(rax, rax);
296 }
297
iconst(int value)298 void TemplateTable::iconst(int value) {
299 transition(vtos, itos);
300 if (value == 0) {
301 __ xorl(rax, rax);
302 } else {
303 __ movl(rax, value);
304 }
305 }
306
lconst(int value)307 void TemplateTable::lconst(int value) {
308 transition(vtos, ltos);
309 if (value == 0) {
310 __ xorl(rax, rax);
311 } else {
312 __ movl(rax, value);
313 }
314 }
315
fconst(int value)316 void TemplateTable::fconst(int value) {
317 transition(vtos, ftos);
318 static float one = 1.0f, two = 2.0f;
319 switch (value) {
320 case 0:
321 __ xorps(xmm0, xmm0);
322 break;
323 case 1:
324 __ movflt(xmm0, ExternalAddress((address) &one));
325 break;
326 case 2:
327 __ movflt(xmm0, ExternalAddress((address) &two));
328 break;
329 default:
330 ShouldNotReachHere();
331 break;
332 }
333 }
334
dconst(int value)335 void TemplateTable::dconst(int value) {
336 transition(vtos, dtos);
337 static double one = 1.0;
338 switch (value) {
339 case 0:
340 __ xorpd(xmm0, xmm0);
341 break;
342 case 1:
343 __ movdbl(xmm0, ExternalAddress((address) &one));
344 break;
345 default:
346 ShouldNotReachHere();
347 break;
348 }
349 }
350
bipush()351 void TemplateTable::bipush() {
352 transition(vtos, itos);
353 __ load_signed_byte(rax, at_bcp(1));
354 }
355
sipush()356 void TemplateTable::sipush() {
357 transition(vtos, itos);
358 __ load_unsigned_short(rax, at_bcp(1));
359 __ bswapl(rax);
360 __ sarl(rax, 16);
361 }
362
ldc(bool wide)363 void TemplateTable::ldc(bool wide) {
364 transition(vtos, vtos);
365 Label call_ldc, notFloat, notClass, Done;
366
367 if (wide) {
368 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
369 } else {
370 __ load_unsigned_byte(rbx, at_bcp(1));
371 }
372
373 __ get_cpool_and_tags(rcx, rax);
374 const int base_offset = ConstantPool::header_size() * wordSize;
375 const int tags_offset = Array<u1>::base_offset_in_bytes();
376
377 // get type
378 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
379
380 // unresolved class - get the resolved class
381 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
382 __ jccb(Assembler::equal, call_ldc);
383
384 // unresolved class in error state - call into runtime to throw the error
385 // from the first resolution attempt
386 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
387 __ jccb(Assembler::equal, call_ldc);
388
389 // resolved class - need to call vm to get java mirror of the class
390 __ cmpl(rdx, JVM_CONSTANT_Class);
391 __ jcc(Assembler::notEqual, notClass);
392
393 __ bind(call_ldc);
394 __ movl(c_rarg1, wide);
395 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1);
396 __ push_ptr(rax);
397 __ verify_oop(rax);
398 __ jmp(Done);
399
400 __ bind(notClass);
401 __ cmpl(rdx, JVM_CONSTANT_Float);
402 __ jccb(Assembler::notEqual, notFloat);
403 // ftos
404 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
405 __ push_f();
406 __ jmp(Done);
407
408 __ bind(notFloat);
409 #ifdef ASSERT
410 {
411 Label L;
412 __ cmpl(rdx, JVM_CONSTANT_Integer);
413 __ jcc(Assembler::equal, L);
414 // String and Object are rewritten to fast_aldc
415 __ stop("unexpected tag type in ldc");
416 __ bind(L);
417 }
418 #endif
419 // itos JVM_CONSTANT_Integer only
420 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
421 __ push_i(rax);
422 __ bind(Done);
423 }
424
425 // Fast path for caching oop constants.
fast_aldc(bool wide)426 void TemplateTable::fast_aldc(bool wide) {
427 transition(vtos, atos);
428
429 Register result = rax;
430 Register tmp = rdx;
431 int index_size = wide ? sizeof(u2) : sizeof(u1);
432
433 Label resolved;
434
435 // We are resolved if the resolved reference cache entry contains a
436 // non-null object (String, MethodType, etc.)
437 assert_different_registers(result, tmp);
438 __ get_cache_index_at_bcp(tmp, 1, index_size);
439 __ load_resolved_reference_at_index(result, tmp);
440 __ testl(result, result);
441 __ jcc(Assembler::notZero, resolved);
442
443 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
444
445 // first time invocation - must resolve first
446 __ movl(tmp, (int)bytecode());
447 __ call_VM(result, entry, tmp);
448
449 __ bind(resolved);
450
451 if (VerifyOops) {
452 __ verify_oop(result);
453 }
454 }
455
ldc2_w()456 void TemplateTable::ldc2_w() {
457 transition(vtos, vtos);
458 Label Long, Done;
459 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
460
461 __ get_cpool_and_tags(rcx, rax);
462 const int base_offset = ConstantPool::header_size() * wordSize;
463 const int tags_offset = Array<u1>::base_offset_in_bytes();
464
465 // get type
466 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
467 JVM_CONSTANT_Double);
468 __ jccb(Assembler::notEqual, Long);
469 // dtos
470 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset));
471 __ push_d();
472 __ jmpb(Done);
473
474 __ bind(Long);
475 // ltos
476 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
477 __ push_l();
478
479 __ bind(Done);
480 }
481
locals_index(Register reg,int offset)482 void TemplateTable::locals_index(Register reg, int offset) {
483 __ load_unsigned_byte(reg, at_bcp(offset));
484 __ negptr(reg);
485 }
486
iload()487 void TemplateTable::iload() {
488 transition(vtos, itos);
489 if (RewriteFrequentPairs) {
490 Label rewrite, done;
491 const Register bc = c_rarg3;
492 assert(rbx != bc, "register damaged");
493
494 // get next byte
495 __ load_unsigned_byte(rbx,
496 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
497 // if _iload, wait to rewrite to iload2. We only want to rewrite the
498 // last two iloads in a pair. Comparing against fast_iload means that
499 // the next bytecode is neither an iload or a caload, and therefore
500 // an iload pair.
501 __ cmpl(rbx, Bytecodes::_iload);
502 __ jcc(Assembler::equal, done);
503
504 __ cmpl(rbx, Bytecodes::_fast_iload);
505 __ movl(bc, Bytecodes::_fast_iload2);
506 __ jccb(Assembler::equal, rewrite);
507
508 // if _caload, rewrite to fast_icaload
509 __ cmpl(rbx, Bytecodes::_caload);
510 __ movl(bc, Bytecodes::_fast_icaload);
511 __ jccb(Assembler::equal, rewrite);
512
513 // rewrite so iload doesn't check again.
514 __ movl(bc, Bytecodes::_fast_iload);
515
516 // rewrite
517 // bc: fast bytecode
518 __ bind(rewrite);
519 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
520 __ bind(done);
521 }
522
523 // Get the local value into tos
524 locals_index(rbx);
525 __ movl(rax, iaddress(rbx));
526 }
527
fast_iload2()528 void TemplateTable::fast_iload2() {
529 transition(vtos, itos);
530 locals_index(rbx);
531 __ movl(rax, iaddress(rbx));
532 __ push(itos);
533 locals_index(rbx, 3);
534 __ movl(rax, iaddress(rbx));
535 }
536
fast_iload()537 void TemplateTable::fast_iload() {
538 transition(vtos, itos);
539 locals_index(rbx);
540 __ movl(rax, iaddress(rbx));
541 }
542
lload()543 void TemplateTable::lload() {
544 transition(vtos, ltos);
545 locals_index(rbx);
546 __ movq(rax, laddress(rbx));
547 }
548
fload()549 void TemplateTable::fload() {
550 transition(vtos, ftos);
551 locals_index(rbx);
552 __ movflt(xmm0, faddress(rbx));
553 }
554
dload()555 void TemplateTable::dload() {
556 transition(vtos, dtos);
557 locals_index(rbx);
558 __ movdbl(xmm0, daddress(rbx));
559 }
560
aload()561 void TemplateTable::aload() {
562 transition(vtos, atos);
563 locals_index(rbx);
564 __ movptr(rax, aaddress(rbx));
565 }
566
locals_index_wide(Register reg)567 void TemplateTable::locals_index_wide(Register reg) {
568 __ load_unsigned_short(reg, at_bcp(2));
569 __ bswapl(reg);
570 __ shrl(reg, 16);
571 __ negptr(reg);
572 }
573
wide_iload()574 void TemplateTable::wide_iload() {
575 transition(vtos, itos);
576 locals_index_wide(rbx);
577 __ movl(rax, iaddress(rbx));
578 }
579
wide_lload()580 void TemplateTable::wide_lload() {
581 transition(vtos, ltos);
582 locals_index_wide(rbx);
583 __ movq(rax, laddress(rbx));
584 }
585
wide_fload()586 void TemplateTable::wide_fload() {
587 transition(vtos, ftos);
588 locals_index_wide(rbx);
589 __ movflt(xmm0, faddress(rbx));
590 }
591
wide_dload()592 void TemplateTable::wide_dload() {
593 transition(vtos, dtos);
594 locals_index_wide(rbx);
595 __ movdbl(xmm0, daddress(rbx));
596 }
597
wide_aload()598 void TemplateTable::wide_aload() {
599 transition(vtos, atos);
600 locals_index_wide(rbx);
601 __ movptr(rax, aaddress(rbx));
602 }
603
index_check(Register array,Register index)604 void TemplateTable::index_check(Register array, Register index) {
605 // destroys rbx
606 // check array
607 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
608 // sign extend index for use by indexed load
609 __ movl2ptr(index, index);
610 // check index
611 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
612 if (index != rbx) {
613 // ??? convention: move aberrant index into ebx for exception message
614 assert(rbx != array, "different registers");
615 __ movl(rbx, index);
616 }
617 __ jump_cc(Assembler::aboveEqual,
618 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
619 }
620
iaload()621 void TemplateTable::iaload() {
622 transition(itos, itos);
623 __ pop_ptr(rdx);
624 // eax: index
625 // rdx: array
626 index_check(rdx, rax); // kills rbx
627 __ movl(rax, Address(rdx, rax,
628 Address::times_4,
629 arrayOopDesc::base_offset_in_bytes(T_INT)));
630 }
631
laload()632 void TemplateTable::laload() {
633 transition(itos, ltos);
634 __ pop_ptr(rdx);
635 // eax: index
636 // rdx: array
637 index_check(rdx, rax); // kills rbx
638 __ movq(rax, Address(rdx, rbx,
639 Address::times_8,
640 arrayOopDesc::base_offset_in_bytes(T_LONG)));
641 }
642
faload()643 void TemplateTable::faload() {
644 transition(itos, ftos);
645 __ pop_ptr(rdx);
646 // eax: index
647 // rdx: array
648 index_check(rdx, rax); // kills rbx
649 __ movflt(xmm0, Address(rdx, rax,
650 Address::times_4,
651 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
652 }
653
daload()654 void TemplateTable::daload() {
655 transition(itos, dtos);
656 __ pop_ptr(rdx);
657 // eax: index
658 // rdx: array
659 index_check(rdx, rax); // kills rbx
660 __ movdbl(xmm0, Address(rdx, rax,
661 Address::times_8,
662 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
663 }
664
aaload()665 void TemplateTable::aaload() {
666 transition(itos, atos);
667 __ pop_ptr(rdx);
668 // eax: index
669 // rdx: array
670 index_check(rdx, rax); // kills rbx
671 __ load_heap_oop(rax, Address(rdx, rax,
672 UseCompressedOops ? Address::times_4 : Address::times_8,
673 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
674 }
675
baload()676 void TemplateTable::baload() {
677 transition(itos, itos);
678 __ pop_ptr(rdx);
679 // eax: index
680 // rdx: array
681 index_check(rdx, rax); // kills rbx
682 __ load_signed_byte(rax,
683 Address(rdx, rax,
684 Address::times_1,
685 arrayOopDesc::base_offset_in_bytes(T_BYTE)));
686 }
687
caload()688 void TemplateTable::caload() {
689 transition(itos, itos);
690 __ pop_ptr(rdx);
691 // eax: index
692 // rdx: array
693 index_check(rdx, rax); // kills rbx
694 __ load_unsigned_short(rax,
695 Address(rdx, rax,
696 Address::times_2,
697 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
698 }
699
700 // iload followed by caload frequent pair
fast_icaload()701 void TemplateTable::fast_icaload() {
702 transition(vtos, itos);
703 // load index out of locals
704 locals_index(rbx);
705 __ movl(rax, iaddress(rbx));
706
707 // eax: index
708 // rdx: array
709 __ pop_ptr(rdx);
710 index_check(rdx, rax); // kills rbx
711 __ load_unsigned_short(rax,
712 Address(rdx, rax,
713 Address::times_2,
714 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
715 }
716
saload()717 void TemplateTable::saload() {
718 transition(itos, itos);
719 __ pop_ptr(rdx);
720 // eax: index
721 // rdx: array
722 index_check(rdx, rax); // kills rbx
723 __ load_signed_short(rax,
724 Address(rdx, rax,
725 Address::times_2,
726 arrayOopDesc::base_offset_in_bytes(T_SHORT)));
727 }
728
iload(int n)729 void TemplateTable::iload(int n) {
730 transition(vtos, itos);
731 __ movl(rax, iaddress(n));
732 }
733
lload(int n)734 void TemplateTable::lload(int n) {
735 transition(vtos, ltos);
736 __ movq(rax, laddress(n));
737 }
738
fload(int n)739 void TemplateTable::fload(int n) {
740 transition(vtos, ftos);
741 __ movflt(xmm0, faddress(n));
742 }
743
dload(int n)744 void TemplateTable::dload(int n) {
745 transition(vtos, dtos);
746 __ movdbl(xmm0, daddress(n));
747 }
748
aload(int n)749 void TemplateTable::aload(int n) {
750 transition(vtos, atos);
751 __ movptr(rax, aaddress(n));
752 }
753
aload_0()754 void TemplateTable::aload_0() {
755 transition(vtos, atos);
756 // According to bytecode histograms, the pairs:
757 //
758 // _aload_0, _fast_igetfield
759 // _aload_0, _fast_agetfield
760 // _aload_0, _fast_fgetfield
761 //
762 // occur frequently. If RewriteFrequentPairs is set, the (slow)
763 // _aload_0 bytecode checks if the next bytecode is either
764 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
765 // rewrites the current bytecode into a pair bytecode; otherwise it
766 // rewrites the current bytecode into _fast_aload_0 that doesn't do
767 // the pair check anymore.
768 //
769 // Note: If the next bytecode is _getfield, the rewrite must be
770 // delayed, otherwise we may miss an opportunity for a pair.
771 //
772 // Also rewrite frequent pairs
773 // aload_0, aload_1
774 // aload_0, iload_1
775 // These bytecodes with a small amount of code are most profitable
776 // to rewrite
777 if (RewriteFrequentPairs) {
778 Label rewrite, done;
779 const Register bc = c_rarg3;
780 assert(rbx != bc, "register damaged");
781 // get next byte
782 __ load_unsigned_byte(rbx,
783 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
784
785 // do actual aload_0
786 aload(0);
787
788 // if _getfield then wait with rewrite
789 __ cmpl(rbx, Bytecodes::_getfield);
790 __ jcc(Assembler::equal, done);
791
792 // if _igetfield then reqrite to _fast_iaccess_0
793 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
794 Bytecodes::_aload_0,
795 "fix bytecode definition");
796 __ cmpl(rbx, Bytecodes::_fast_igetfield);
797 __ movl(bc, Bytecodes::_fast_iaccess_0);
798 __ jccb(Assembler::equal, rewrite);
799
800 // if _agetfield then reqrite to _fast_aaccess_0
801 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
802 Bytecodes::_aload_0,
803 "fix bytecode definition");
804 __ cmpl(rbx, Bytecodes::_fast_agetfield);
805 __ movl(bc, Bytecodes::_fast_aaccess_0);
806 __ jccb(Assembler::equal, rewrite);
807
808 // if _fgetfield then reqrite to _fast_faccess_0
809 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
810 Bytecodes::_aload_0,
811 "fix bytecode definition");
812 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
813 __ movl(bc, Bytecodes::_fast_faccess_0);
814 __ jccb(Assembler::equal, rewrite);
815
816 // else rewrite to _fast_aload0
817 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
818 Bytecodes::_aload_0,
819 "fix bytecode definition");
820 __ movl(bc, Bytecodes::_fast_aload_0);
821
822 // rewrite
823 // bc: fast bytecode
824 __ bind(rewrite);
825 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
826
827 __ bind(done);
828 } else {
829 aload(0);
830 }
831 }
832
istore()833 void TemplateTable::istore() {
834 transition(itos, vtos);
835 locals_index(rbx);
836 __ movl(iaddress(rbx), rax);
837 }
838
lstore()839 void TemplateTable::lstore() {
840 transition(ltos, vtos);
841 locals_index(rbx);
842 __ movq(laddress(rbx), rax);
843 }
844
fstore()845 void TemplateTable::fstore() {
846 transition(ftos, vtos);
847 locals_index(rbx);
848 __ movflt(faddress(rbx), xmm0);
849 }
850
dstore()851 void TemplateTable::dstore() {
852 transition(dtos, vtos);
853 locals_index(rbx);
854 __ movdbl(daddress(rbx), xmm0);
855 }
856
astore()857 void TemplateTable::astore() {
858 transition(vtos, vtos);
859 __ pop_ptr(rax);
860 locals_index(rbx);
861 __ movptr(aaddress(rbx), rax);
862 }
863
wide_istore()864 void TemplateTable::wide_istore() {
865 transition(vtos, vtos);
866 __ pop_i();
867 locals_index_wide(rbx);
868 __ movl(iaddress(rbx), rax);
869 }
870
wide_lstore()871 void TemplateTable::wide_lstore() {
872 transition(vtos, vtos);
873 __ pop_l();
874 locals_index_wide(rbx);
875 __ movq(laddress(rbx), rax);
876 }
877
wide_fstore()878 void TemplateTable::wide_fstore() {
879 transition(vtos, vtos);
880 __ pop_f();
881 locals_index_wide(rbx);
882 __ movflt(faddress(rbx), xmm0);
883 }
884
wide_dstore()885 void TemplateTable::wide_dstore() {
886 transition(vtos, vtos);
887 __ pop_d();
888 locals_index_wide(rbx);
889 __ movdbl(daddress(rbx), xmm0);
890 }
891
wide_astore()892 void TemplateTable::wide_astore() {
893 transition(vtos, vtos);
894 __ pop_ptr(rax);
895 locals_index_wide(rbx);
896 __ movptr(aaddress(rbx), rax);
897 }
898
iastore()899 void TemplateTable::iastore() {
900 transition(itos, vtos);
901 __ pop_i(rbx);
902 __ pop_ptr(rdx);
903 // eax: value
904 // ebx: index
905 // rdx: array
906 index_check(rdx, rbx); // prefer index in ebx
907 __ movl(Address(rdx, rbx,
908 Address::times_4,
909 arrayOopDesc::base_offset_in_bytes(T_INT)),
910 rax);
911 }
912
lastore()913 void TemplateTable::lastore() {
914 transition(ltos, vtos);
915 __ pop_i(rbx);
916 __ pop_ptr(rdx);
917 // rax: value
918 // ebx: index
919 // rdx: array
920 index_check(rdx, rbx); // prefer index in ebx
921 __ movq(Address(rdx, rbx,
922 Address::times_8,
923 arrayOopDesc::base_offset_in_bytes(T_LONG)),
924 rax);
925 }
926
fastore()927 void TemplateTable::fastore() {
928 transition(ftos, vtos);
929 __ pop_i(rbx);
930 __ pop_ptr(rdx);
931 // xmm0: value
932 // ebx: index
933 // rdx: array
934 index_check(rdx, rbx); // prefer index in ebx
935 __ movflt(Address(rdx, rbx,
936 Address::times_4,
937 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
938 xmm0);
939 }
940
dastore()941 void TemplateTable::dastore() {
942 transition(dtos, vtos);
943 __ pop_i(rbx);
944 __ pop_ptr(rdx);
945 // xmm0: value
946 // ebx: index
947 // rdx: array
948 index_check(rdx, rbx); // prefer index in ebx
949 __ movdbl(Address(rdx, rbx,
950 Address::times_8,
951 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
952 xmm0);
953 }
954
aastore()955 void TemplateTable::aastore() {
956 Label is_null, ok_is_subtype, done;
957 transition(vtos, vtos);
958 // stack: ..., array, index, value
959 __ movptr(rax, at_tos()); // value
960 __ movl(rcx, at_tos_p1()); // index
961 __ movptr(rdx, at_tos_p2()); // array
962
963 Address element_address(rdx, rcx,
964 UseCompressedOops? Address::times_4 : Address::times_8,
965 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
966
967 index_check(rdx, rcx); // kills rbx
968 // do array store check - check for NULL value first
969 __ testptr(rax, rax);
970 __ jcc(Assembler::zero, is_null);
971
972 // Move subklass into rbx
973 __ load_klass(rbx, rax);
974 // Move superklass into rax
975 __ load_klass(rax, rdx);
976 __ movptr(rax, Address(rax,
977 ObjArrayKlass::element_klass_offset()));
978 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
979 __ lea(rdx, element_address);
980
981 // Generate subtype check. Blows rcx, rdi
982 // Superklass in rax. Subklass in rbx.
983 __ gen_subtype_check(rbx, ok_is_subtype);
984
985 // Come here on failure
986 // object is at TOS
987 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
988
989 // Come here on success
990 __ bind(ok_is_subtype);
991
992 // Get the value we will store
993 __ movptr(rax, at_tos());
994 // Now store using the appropriate barrier
995 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
996 __ jmp(done);
997
998 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
999 __ bind(is_null);
1000 __ profile_null_seen(rbx);
1001
1002 // Store a NULL
1003 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1004
1005 // Pop stack arguments
1006 __ bind(done);
1007 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1008 }
1009
bastore()1010 void TemplateTable::bastore() {
1011 transition(itos, vtos);
1012 __ pop_i(rbx);
1013 __ pop_ptr(rdx);
1014 // eax: value
1015 // ebx: index
1016 // rdx: array
1017 index_check(rdx, rbx); // prefer index in ebx
1018 // Need to check whether array is boolean or byte
1019 // since both types share the bastore bytecode.
1020 __ load_klass(rcx, rdx);
1021 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1022 int diffbit = Klass::layout_helper_boolean_diffbit();
1023 __ testl(rcx, diffbit);
1024 Label L_skip;
1025 __ jccb(Assembler::zero, L_skip);
1026 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1027 __ bind(L_skip);
1028 __ movb(Address(rdx, rbx,
1029 Address::times_1,
1030 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1031 rax);
1032 }
1033
castore()1034 void TemplateTable::castore() {
1035 transition(itos, vtos);
1036 __ pop_i(rbx);
1037 __ pop_ptr(rdx);
1038 // eax: value
1039 // ebx: index
1040 // rdx: array
1041 index_check(rdx, rbx); // prefer index in ebx
1042 __ movw(Address(rdx, rbx,
1043 Address::times_2,
1044 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1045 rax);
1046 }
1047
sastore()1048 void TemplateTable::sastore() {
1049 castore();
1050 }
1051
istore(int n)1052 void TemplateTable::istore(int n) {
1053 transition(itos, vtos);
1054 __ movl(iaddress(n), rax);
1055 }
1056
lstore(int n)1057 void TemplateTable::lstore(int n) {
1058 transition(ltos, vtos);
1059 __ movq(laddress(n), rax);
1060 }
1061
fstore(int n)1062 void TemplateTable::fstore(int n) {
1063 transition(ftos, vtos);
1064 __ movflt(faddress(n), xmm0);
1065 }
1066
dstore(int n)1067 void TemplateTable::dstore(int n) {
1068 transition(dtos, vtos);
1069 __ movdbl(daddress(n), xmm0);
1070 }
1071
astore(int n)1072 void TemplateTable::astore(int n) {
1073 transition(vtos, vtos);
1074 __ pop_ptr(rax);
1075 __ movptr(aaddress(n), rax);
1076 }
1077
pop()1078 void TemplateTable::pop() {
1079 transition(vtos, vtos);
1080 __ addptr(rsp, Interpreter::stackElementSize);
1081 }
1082
pop2()1083 void TemplateTable::pop2() {
1084 transition(vtos, vtos);
1085 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1086 }
1087
dup()1088 void TemplateTable::dup() {
1089 transition(vtos, vtos);
1090 __ load_ptr(0, rax);
1091 __ push_ptr(rax);
1092 // stack: ..., a, a
1093 }
1094
dup_x1()1095 void TemplateTable::dup_x1() {
1096 transition(vtos, vtos);
1097 // stack: ..., a, b
1098 __ load_ptr( 0, rax); // load b
1099 __ load_ptr( 1, rcx); // load a
1100 __ store_ptr(1, rax); // store b
1101 __ store_ptr(0, rcx); // store a
1102 __ push_ptr(rax); // push b
1103 // stack: ..., b, a, b
1104 }
1105
dup_x2()1106 void TemplateTable::dup_x2() {
1107 transition(vtos, vtos);
1108 // stack: ..., a, b, c
1109 __ load_ptr( 0, rax); // load c
1110 __ load_ptr( 2, rcx); // load a
1111 __ store_ptr(2, rax); // store c in a
1112 __ push_ptr(rax); // push c
1113 // stack: ..., c, b, c, c
1114 __ load_ptr( 2, rax); // load b
1115 __ store_ptr(2, rcx); // store a in b
1116 // stack: ..., c, a, c, c
1117 __ store_ptr(1, rax); // store b in c
1118 // stack: ..., c, a, b, c
1119 }
1120
dup2()1121 void TemplateTable::dup2() {
1122 transition(vtos, vtos);
1123 // stack: ..., a, b
1124 __ load_ptr(1, rax); // load a
1125 __ push_ptr(rax); // push a
1126 __ load_ptr(1, rax); // load b
1127 __ push_ptr(rax); // push b
1128 // stack: ..., a, b, a, b
1129 }
1130
dup2_x1()1131 void TemplateTable::dup2_x1() {
1132 transition(vtos, vtos);
1133 // stack: ..., a, b, c
1134 __ load_ptr( 0, rcx); // load c
1135 __ load_ptr( 1, rax); // load b
1136 __ push_ptr(rax); // push b
1137 __ push_ptr(rcx); // push c
1138 // stack: ..., a, b, c, b, c
1139 __ store_ptr(3, rcx); // store c in b
1140 // stack: ..., a, c, c, b, c
1141 __ load_ptr( 4, rcx); // load a
1142 __ store_ptr(2, rcx); // store a in 2nd c
1143 // stack: ..., a, c, a, b, c
1144 __ store_ptr(4, rax); // store b in a
1145 // stack: ..., b, c, a, b, c
1146 }
1147
dup2_x2()1148 void TemplateTable::dup2_x2() {
1149 transition(vtos, vtos);
1150 // stack: ..., a, b, c, d
1151 __ load_ptr( 0, rcx); // load d
1152 __ load_ptr( 1, rax); // load c
1153 __ push_ptr(rax); // push c
1154 __ push_ptr(rcx); // push d
1155 // stack: ..., a, b, c, d, c, d
1156 __ load_ptr( 4, rax); // load b
1157 __ store_ptr(2, rax); // store b in d
1158 __ store_ptr(4, rcx); // store d in b
1159 // stack: ..., a, d, c, b, c, d
1160 __ load_ptr( 5, rcx); // load a
1161 __ load_ptr( 3, rax); // load c
1162 __ store_ptr(3, rcx); // store a in c
1163 __ store_ptr(5, rax); // store c in a
1164 // stack: ..., c, d, a, b, c, d
1165 }
1166
swap()1167 void TemplateTable::swap() {
1168 transition(vtos, vtos);
1169 // stack: ..., a, b
1170 __ load_ptr( 1, rcx); // load a
1171 __ load_ptr( 0, rax); // load b
1172 __ store_ptr(0, rcx); // store a in b
1173 __ store_ptr(1, rax); // store b in a
1174 // stack: ..., b, a
1175 }
1176
iop2(Operation op)1177 void TemplateTable::iop2(Operation op) {
1178 transition(itos, itos);
1179 switch (op) {
1180 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1181 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1182 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1183 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1184 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1185 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1186 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1187 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1188 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1189 default : ShouldNotReachHere();
1190 }
1191 }
1192
lop2(Operation op)1193 void TemplateTable::lop2(Operation op) {
1194 transition(ltos, ltos);
1195 switch (op) {
1196 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1197 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1198 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1199 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1200 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1201 default : ShouldNotReachHere();
1202 }
1203 }
1204
idiv()1205 void TemplateTable::idiv() {
1206 transition(itos, itos);
1207 __ movl(rcx, rax);
1208 __ pop_i(rax);
1209 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1210 // they are not equal, one could do a normal division (no correction
1211 // needed), which may speed up this implementation for the common case.
1212 // (see also JVM spec., p.243 & p.271)
1213 __ corrected_idivl(rcx);
1214 }
1215
irem()1216 void TemplateTable::irem() {
1217 transition(itos, itos);
1218 __ movl(rcx, rax);
1219 __ pop_i(rax);
1220 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If
1221 // they are not equal, one could do a normal division (no correction
1222 // needed), which may speed up this implementation for the common case.
1223 // (see also JVM spec., p.243 & p.271)
1224 __ corrected_idivl(rcx);
1225 __ movl(rax, rdx);
1226 }
1227
lmul()1228 void TemplateTable::lmul() {
1229 transition(ltos, ltos);
1230 __ pop_l(rdx);
1231 __ imulq(rax, rdx);
1232 }
1233
ldiv()1234 void TemplateTable::ldiv() {
1235 transition(ltos, ltos);
1236 __ mov(rcx, rax);
1237 __ pop_l(rax);
1238 // generate explicit div0 check
1239 __ testq(rcx, rcx);
1240 __ jump_cc(Assembler::zero,
1241 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1242 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1243 // they are not equal, one could do a normal division (no correction
1244 // needed), which may speed up this implementation for the common case.
1245 // (see also JVM spec., p.243 & p.271)
1246 __ corrected_idivq(rcx); // kills rbx
1247 }
1248
lrem()1249 void TemplateTable::lrem() {
1250 transition(ltos, ltos);
1251 __ mov(rcx, rax);
1252 __ pop_l(rax);
1253 __ testq(rcx, rcx);
1254 __ jump_cc(Assembler::zero,
1255 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1256 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1257 // they are not equal, one could do a normal division (no correction
1258 // needed), which may speed up this implementation for the common case.
1259 // (see also JVM spec., p.243 & p.271)
1260 __ corrected_idivq(rcx); // kills rbx
1261 __ mov(rax, rdx);
1262 }
1263
lshl()1264 void TemplateTable::lshl() {
1265 transition(itos, ltos);
1266 __ movl(rcx, rax); // get shift count
1267 __ pop_l(rax); // get shift value
1268 __ shlq(rax);
1269 }
1270
lshr()1271 void TemplateTable::lshr() {
1272 transition(itos, ltos);
1273 __ movl(rcx, rax); // get shift count
1274 __ pop_l(rax); // get shift value
1275 __ sarq(rax);
1276 }
1277
lushr()1278 void TemplateTable::lushr() {
1279 transition(itos, ltos);
1280 __ movl(rcx, rax); // get shift count
1281 __ pop_l(rax); // get shift value
1282 __ shrq(rax);
1283 }
1284
fop2(Operation op)1285 void TemplateTable::fop2(Operation op) {
1286 transition(ftos, ftos);
1287 switch (op) {
1288 case add:
1289 __ addss(xmm0, at_rsp());
1290 __ addptr(rsp, Interpreter::stackElementSize);
1291 break;
1292 case sub:
1293 __ movflt(xmm1, xmm0);
1294 __ pop_f(xmm0);
1295 __ subss(xmm0, xmm1);
1296 break;
1297 case mul:
1298 __ mulss(xmm0, at_rsp());
1299 __ addptr(rsp, Interpreter::stackElementSize);
1300 break;
1301 case div:
1302 __ movflt(xmm1, xmm0);
1303 __ pop_f(xmm0);
1304 __ divss(xmm0, xmm1);
1305 break;
1306 case rem:
1307 __ movflt(xmm1, xmm0);
1308 __ pop_f(xmm0);
1309 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1310 break;
1311 default:
1312 ShouldNotReachHere();
1313 break;
1314 }
1315 }
1316
dop2(Operation op)1317 void TemplateTable::dop2(Operation op) {
1318 transition(dtos, dtos);
1319 switch (op) {
1320 case add:
1321 __ addsd(xmm0, at_rsp());
1322 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1323 break;
1324 case sub:
1325 __ movdbl(xmm1, xmm0);
1326 __ pop_d(xmm0);
1327 __ subsd(xmm0, xmm1);
1328 break;
1329 case mul:
1330 __ mulsd(xmm0, at_rsp());
1331 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1332 break;
1333 case div:
1334 __ movdbl(xmm1, xmm0);
1335 __ pop_d(xmm0);
1336 __ divsd(xmm0, xmm1);
1337 break;
1338 case rem:
1339 __ movdbl(xmm1, xmm0);
1340 __ pop_d(xmm0);
1341 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1342 break;
1343 default:
1344 ShouldNotReachHere();
1345 break;
1346 }
1347 }
1348
ineg()1349 void TemplateTable::ineg() {
1350 transition(itos, itos);
1351 __ negl(rax);
1352 }
1353
lneg()1354 void TemplateTable::lneg() {
1355 transition(ltos, ltos);
1356 __ negq(rax);
1357 }
1358
1359 // Note: 'double' and 'long long' have 32-bits alignment on x86.
double_quadword(jlong * adr,jlong lo,jlong hi)1360 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1361 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1362 // of 128-bits operands for SSE instructions.
1363 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1364 // Store the value to a 128-bits operand.
1365 operand[0] = lo;
1366 operand[1] = hi;
1367 return operand;
1368 }
1369
1370 // Buffer for 128-bits masks used by SSE instructions.
1371 static jlong float_signflip_pool[2*2];
1372 static jlong double_signflip_pool[2*2];
1373
fneg()1374 void TemplateTable::fneg() {
1375 transition(ftos, ftos);
1376 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
1377 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1378 }
1379
dneg()1380 void TemplateTable::dneg() {
1381 transition(dtos, dtos);
1382 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
1383 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1384 }
1385
iinc()1386 void TemplateTable::iinc() {
1387 transition(vtos, vtos);
1388 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1389 locals_index(rbx);
1390 __ addl(iaddress(rbx), rdx);
1391 }
1392
wide_iinc()1393 void TemplateTable::wide_iinc() {
1394 transition(vtos, vtos);
1395 __ movl(rdx, at_bcp(4)); // get constant
1396 locals_index_wide(rbx);
1397 __ bswapl(rdx); // swap bytes & sign-extend constant
1398 __ sarl(rdx, 16);
1399 __ addl(iaddress(rbx), rdx);
1400 // Note: should probably use only one movl to get both
1401 // the index and the constant -> fix this
1402 }
1403
convert()1404 void TemplateTable::convert() {
1405 // Checking
1406 #ifdef ASSERT
1407 {
1408 TosState tos_in = ilgl;
1409 TosState tos_out = ilgl;
1410 switch (bytecode()) {
1411 case Bytecodes::_i2l: // fall through
1412 case Bytecodes::_i2f: // fall through
1413 case Bytecodes::_i2d: // fall through
1414 case Bytecodes::_i2b: // fall through
1415 case Bytecodes::_i2c: // fall through
1416 case Bytecodes::_i2s: tos_in = itos; break;
1417 case Bytecodes::_l2i: // fall through
1418 case Bytecodes::_l2f: // fall through
1419 case Bytecodes::_l2d: tos_in = ltos; break;
1420 case Bytecodes::_f2i: // fall through
1421 case Bytecodes::_f2l: // fall through
1422 case Bytecodes::_f2d: tos_in = ftos; break;
1423 case Bytecodes::_d2i: // fall through
1424 case Bytecodes::_d2l: // fall through
1425 case Bytecodes::_d2f: tos_in = dtos; break;
1426 default : ShouldNotReachHere();
1427 }
1428 switch (bytecode()) {
1429 case Bytecodes::_l2i: // fall through
1430 case Bytecodes::_f2i: // fall through
1431 case Bytecodes::_d2i: // fall through
1432 case Bytecodes::_i2b: // fall through
1433 case Bytecodes::_i2c: // fall through
1434 case Bytecodes::_i2s: tos_out = itos; break;
1435 case Bytecodes::_i2l: // fall through
1436 case Bytecodes::_f2l: // fall through
1437 case Bytecodes::_d2l: tos_out = ltos; break;
1438 case Bytecodes::_i2f: // fall through
1439 case Bytecodes::_l2f: // fall through
1440 case Bytecodes::_d2f: tos_out = ftos; break;
1441 case Bytecodes::_i2d: // fall through
1442 case Bytecodes::_l2d: // fall through
1443 case Bytecodes::_f2d: tos_out = dtos; break;
1444 default : ShouldNotReachHere();
1445 }
1446 transition(tos_in, tos_out);
1447 }
1448 #endif // ASSERT
1449
1450 static const int64_t is_nan = 0x8000000000000000L;
1451
1452 // Conversion
1453 switch (bytecode()) {
1454 case Bytecodes::_i2l:
1455 __ movslq(rax, rax);
1456 break;
1457 case Bytecodes::_i2f:
1458 __ cvtsi2ssl(xmm0, rax);
1459 break;
1460 case Bytecodes::_i2d:
1461 __ cvtsi2sdl(xmm0, rax);
1462 break;
1463 case Bytecodes::_i2b:
1464 __ movsbl(rax, rax);
1465 break;
1466 case Bytecodes::_i2c:
1467 __ movzwl(rax, rax);
1468 break;
1469 case Bytecodes::_i2s:
1470 __ movswl(rax, rax);
1471 break;
1472 case Bytecodes::_l2i:
1473 __ movl(rax, rax);
1474 break;
1475 case Bytecodes::_l2f:
1476 __ cvtsi2ssq(xmm0, rax);
1477 break;
1478 case Bytecodes::_l2d:
1479 __ cvtsi2sdq(xmm0, rax);
1480 break;
1481 case Bytecodes::_f2i:
1482 {
1483 Label L;
1484 __ cvttss2sil(rax, xmm0);
1485 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1486 __ jcc(Assembler::notEqual, L);
1487 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1488 __ bind(L);
1489 }
1490 break;
1491 case Bytecodes::_f2l:
1492 {
1493 Label L;
1494 __ cvttss2siq(rax, xmm0);
1495 // NaN or overflow/underflow?
1496 __ cmp64(rax, ExternalAddress((address) &is_nan));
1497 __ jcc(Assembler::notEqual, L);
1498 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1499 __ bind(L);
1500 }
1501 break;
1502 case Bytecodes::_f2d:
1503 __ cvtss2sd(xmm0, xmm0);
1504 break;
1505 case Bytecodes::_d2i:
1506 {
1507 Label L;
1508 __ cvttsd2sil(rax, xmm0);
1509 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1510 __ jcc(Assembler::notEqual, L);
1511 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1512 __ bind(L);
1513 }
1514 break;
1515 case Bytecodes::_d2l:
1516 {
1517 Label L;
1518 __ cvttsd2siq(rax, xmm0);
1519 // NaN or overflow/underflow?
1520 __ cmp64(rax, ExternalAddress((address) &is_nan));
1521 __ jcc(Assembler::notEqual, L);
1522 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1523 __ bind(L);
1524 }
1525 break;
1526 case Bytecodes::_d2f:
1527 __ cvtsd2ss(xmm0, xmm0);
1528 break;
1529 default:
1530 ShouldNotReachHere();
1531 }
1532 }
1533
lcmp()1534 void TemplateTable::lcmp() {
1535 transition(ltos, itos);
1536 Label done;
1537 __ pop_l(rdx);
1538 __ cmpq(rdx, rax);
1539 __ movl(rax, -1);
1540 __ jccb(Assembler::less, done);
1541 __ setb(Assembler::notEqual, rax);
1542 __ movzbl(rax, rax);
1543 __ bind(done);
1544 }
1545
float_cmp(bool is_float,int unordered_result)1546 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1547 Label done;
1548 if (is_float) {
1549 // XXX get rid of pop here, use ... reg, mem32
1550 __ pop_f(xmm1);
1551 __ ucomiss(xmm1, xmm0);
1552 } else {
1553 // XXX get rid of pop here, use ... reg, mem64
1554 __ pop_d(xmm1);
1555 __ ucomisd(xmm1, xmm0);
1556 }
1557 if (unordered_result < 0) {
1558 __ movl(rax, -1);
1559 __ jccb(Assembler::parity, done);
1560 __ jccb(Assembler::below, done);
1561 __ setb(Assembler::notEqual, rdx);
1562 __ movzbl(rax, rdx);
1563 } else {
1564 __ movl(rax, 1);
1565 __ jccb(Assembler::parity, done);
1566 __ jccb(Assembler::above, done);
1567 __ movl(rax, 0);
1568 __ jccb(Assembler::equal, done);
1569 __ decrementl(rax);
1570 }
1571 __ bind(done);
1572 }
1573
branch(bool is_jsr,bool is_wide)1574 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1575 __ get_method(rcx); // rcx holds method
1576 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
1577 // holds bumped taken count
1578
1579 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1580 InvocationCounter::counter_offset();
1581 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1582 InvocationCounter::counter_offset();
1583
1584 // Load up edx with the branch displacement
1585 if (is_wide) {
1586 __ movl(rdx, at_bcp(1));
1587 } else {
1588 __ load_signed_short(rdx, at_bcp(1));
1589 }
1590 __ bswapl(rdx);
1591
1592 if (!is_wide) {
1593 __ sarl(rdx, 16);
1594 }
1595 __ movl2ptr(rdx, rdx);
1596
1597 // Handle all the JSR stuff here, then exit.
1598 // It's much shorter and cleaner than intermingling with the non-JSR
1599 // normal-branch stuff occurring below.
1600 if (is_jsr) {
1601 // Pre-load the next target bytecode into rbx
1602 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
1603
1604 // compute return address as bci in rax
1605 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
1606 in_bytes(ConstMethod::codes_offset())));
1607 __ subptr(rax, Address(rcx, Method::const_offset()));
1608 // Adjust the bcp in r13 by the displacement in rdx
1609 __ addptr(r13, rdx);
1610 // jsr returns atos that is not an oop
1611 __ push_i(rax);
1612 __ dispatch_only(vtos);
1613 return;
1614 }
1615
1616 // Normal (non-jsr) branch handling
1617
1618 // Adjust the bcp in r13 by the displacement in rdx
1619 __ addptr(r13, rdx);
1620
1621 assert(UseLoopCounter || !UseOnStackReplacement,
1622 "on-stack-replacement requires loop counters");
1623 Label backedge_counter_overflow;
1624 Label profile_method;
1625 Label dispatch;
1626 if (UseLoopCounter) {
1627 // increment backedge counter for backward branches
1628 // rax: MDO
1629 // ebx: MDO bumped taken-count
1630 // rcx: method
1631 // rdx: target offset
1632 // r13: target bcp
1633 // r14: locals pointer
1634 __ testl(rdx, rdx); // check if forward or backward branch
1635 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1636
1637 // check if MethodCounters exists
1638 Label has_counters;
1639 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1640 __ testptr(rax, rax);
1641 __ jcc(Assembler::notZero, has_counters);
1642 __ push(rdx);
1643 __ push(rcx);
1644 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
1645 rcx);
1646 __ pop(rcx);
1647 __ pop(rdx);
1648 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1649 __ jcc(Assembler::zero, dispatch);
1650 __ bind(has_counters);
1651
1652 if (TieredCompilation) {
1653 Label no_mdo;
1654 int increment = InvocationCounter::count_increment;
1655 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1656 if (ProfileInterpreter) {
1657 // Are we profiling?
1658 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1659 __ testptr(rbx, rbx);
1660 __ jccb(Assembler::zero, no_mdo);
1661 // Increment the MDO backedge counter
1662 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1663 in_bytes(InvocationCounter::counter_offset()));
1664 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
1665 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
1666 __ jmp(dispatch);
1667 }
1668 __ bind(no_mdo);
1669 // Increment backedge counter in MethodCounters*
1670 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1671 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1672 rax, false, Assembler::zero,
1673 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
1674 } else {
1675 // increment counter
1676 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1677 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1678 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1679 __ movl(Address(rcx, be_offset), rax); // store counter
1680
1681 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1682
1683 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1684 __ addl(rax, Address(rcx, be_offset)); // add both counters
1685
1686 if (ProfileInterpreter) {
1687 // Test to see if we should create a method data oop
1688 __ cmp32(rax,
1689 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1690 __ jcc(Assembler::less, dispatch);
1691
1692 // if no method data exists, go to profile method
1693 __ test_method_data_pointer(rax, profile_method);
1694
1695 if (UseOnStackReplacement) {
1696 // check for overflow against ebx which is the MDO taken count
1697 __ cmp32(rbx,
1698 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1699 __ jcc(Assembler::below, dispatch);
1700
1701 // When ProfileInterpreter is on, the backedge_count comes
1702 // from the MethodData*, which value does not get reset on
1703 // the call to frequency_counter_overflow(). To avoid
1704 // excessive calls to the overflow routine while the method is
1705 // being compiled, add a second test to make sure the overflow
1706 // function is called only once every overflow_frequency.
1707 const int overflow_frequency = 1024;
1708 __ andl(rbx, overflow_frequency - 1);
1709 __ jcc(Assembler::zero, backedge_counter_overflow);
1710
1711 }
1712 } else {
1713 if (UseOnStackReplacement) {
1714 // check for overflow against eax, which is the sum of the
1715 // counters
1716 __ cmp32(rax,
1717 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1718 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1719
1720 }
1721 }
1722 }
1723 __ bind(dispatch);
1724 }
1725
1726 // Pre-load the next target bytecode into rbx
1727 __ load_unsigned_byte(rbx, Address(r13, 0));
1728
1729 // continue with the bytecode @ target
1730 // eax: return bci for jsr's, unused otherwise
1731 // ebx: target bytecode
1732 // r13: target bcp
1733 __ dispatch_only(vtos);
1734
1735 if (UseLoopCounter) {
1736 if (ProfileInterpreter) {
1737 // Out-of-line code to allocate method data oop.
1738 __ bind(profile_method);
1739 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1740 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1741 __ set_method_data_pointer_for_bcp();
1742 __ jmp(dispatch);
1743 }
1744
1745 if (UseOnStackReplacement) {
1746 // invocation counter overflow
1747 __ bind(backedge_counter_overflow);
1748 __ negptr(rdx);
1749 __ addptr(rdx, r13); // branch bcp
1750 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
1751 __ call_VM(noreg,
1752 CAST_FROM_FN_PTR(address,
1753 InterpreterRuntime::frequency_counter_overflow),
1754 rdx);
1755 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
1756
1757 // rax: osr nmethod (osr ok) or NULL (osr not possible)
1758 // ebx: target bytecode
1759 // rdx: scratch
1760 // r14: locals pointer
1761 // r13: bcp
1762 __ testptr(rax, rax); // test result
1763 __ jcc(Assembler::zero, dispatch); // no osr if null
1764 // nmethod may have been invalidated (VM may block upon call_VM return)
1765 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1766 __ cmpl(rcx, InvalidOSREntryBci);
1767 __ jcc(Assembler::equal, dispatch);
1768
1769 // We have the address of an on stack replacement routine in eax
1770 // We need to prepare to execute the OSR method. First we must
1771 // migrate the locals and monitors off of the stack.
1772
1773 __ mov(r13, rax); // save the nmethod
1774
1775 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1776
1777 // eax is OSR buffer, move it to expected parameter location
1778 __ mov(j_rarg0, rax);
1779
1780 // We use j_rarg definitions here so that registers don't conflict as parameter
1781 // registers change across platforms as we are in the midst of a calling
1782 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
1783
1784 const Register retaddr = j_rarg2;
1785 const Register sender_sp = j_rarg1;
1786
1787 // pop the interpreter frame
1788 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1789 __ leave(); // remove frame anchor
1790 __ pop(retaddr); // get return address
1791 __ mov(rsp, sender_sp); // set sp to sender sp
1792 // Ensure compiled code always sees stack at proper alignment
1793 __ andptr(rsp, -(StackAlignmentInBytes));
1794
1795 // unlike x86 we need no specialized return from compiled code
1796 // to the interpreter or the call stub.
1797
1798 // push the return address
1799 __ push(retaddr);
1800
1801 // and begin the OSR nmethod
1802 __ jmp(Address(r13, nmethod::osr_entry_point_offset()));
1803 }
1804 }
1805 }
1806
1807
if_0cmp(Condition cc)1808 void TemplateTable::if_0cmp(Condition cc) {
1809 transition(itos, vtos);
1810 // assume branch is more often taken than not (loops use backward branches)
1811 Label not_taken;
1812 __ testl(rax, rax);
1813 __ jcc(j_not(cc), not_taken);
1814 branch(false, false);
1815 __ bind(not_taken);
1816 __ profile_not_taken_branch(rax);
1817 }
1818
if_icmp(Condition cc)1819 void TemplateTable::if_icmp(Condition cc) {
1820 transition(itos, vtos);
1821 // assume branch is more often taken than not (loops use backward branches)
1822 Label not_taken;
1823 __ pop_i(rdx);
1824 __ cmpl(rdx, rax);
1825 __ jcc(j_not(cc), not_taken);
1826 branch(false, false);
1827 __ bind(not_taken);
1828 __ profile_not_taken_branch(rax);
1829 }
1830
if_nullcmp(Condition cc)1831 void TemplateTable::if_nullcmp(Condition cc) {
1832 transition(atos, vtos);
1833 // assume branch is more often taken than not (loops use backward branches)
1834 Label not_taken;
1835 __ testptr(rax, rax);
1836 __ jcc(j_not(cc), not_taken);
1837 branch(false, false);
1838 __ bind(not_taken);
1839 __ profile_not_taken_branch(rax);
1840 }
1841
if_acmp(Condition cc)1842 void TemplateTable::if_acmp(Condition cc) {
1843 transition(atos, vtos);
1844 // assume branch is more often taken than not (loops use backward branches)
1845 Label not_taken;
1846 __ pop_ptr(rdx);
1847 __ cmpptr(rdx, rax);
1848 __ jcc(j_not(cc), not_taken);
1849 branch(false, false);
1850 __ bind(not_taken);
1851 __ profile_not_taken_branch(rax);
1852 }
1853
ret()1854 void TemplateTable::ret() {
1855 transition(vtos, vtos);
1856 locals_index(rbx);
1857 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
1858 __ profile_ret(rbx, rcx);
1859 __ get_method(rax);
1860 __ movptr(r13, Address(rax, Method::const_offset()));
1861 __ lea(r13, Address(r13, rbx, Address::times_1,
1862 ConstMethod::codes_offset()));
1863 __ dispatch_next(vtos);
1864 }
1865
wide_ret()1866 void TemplateTable::wide_ret() {
1867 transition(vtos, vtos);
1868 locals_index_wide(rbx);
1869 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
1870 __ profile_ret(rbx, rcx);
1871 __ get_method(rax);
1872 __ movptr(r13, Address(rax, Method::const_offset()));
1873 __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
1874 __ dispatch_next(vtos);
1875 }
1876
tableswitch()1877 void TemplateTable::tableswitch() {
1878 Label default_case, continue_execution;
1879 transition(itos, vtos);
1880 // align r13
1881 __ lea(rbx, at_bcp(BytesPerInt));
1882 __ andptr(rbx, -BytesPerInt);
1883 // load lo & hi
1884 __ movl(rcx, Address(rbx, BytesPerInt));
1885 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
1886 __ bswapl(rcx);
1887 __ bswapl(rdx);
1888 // check against lo & hi
1889 __ cmpl(rax, rcx);
1890 __ jcc(Assembler::less, default_case);
1891 __ cmpl(rax, rdx);
1892 __ jcc(Assembler::greater, default_case);
1893 // lookup dispatch offset
1894 __ subl(rax, rcx);
1895 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1896 __ profile_switch_case(rax, rbx, rcx);
1897 // continue execution
1898 __ bind(continue_execution);
1899 __ bswapl(rdx);
1900 __ movl2ptr(rdx, rdx);
1901 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1902 __ addptr(r13, rdx);
1903 __ dispatch_only(vtos);
1904 // handle default
1905 __ bind(default_case);
1906 __ profile_switch_default(rax);
1907 __ movl(rdx, Address(rbx, 0));
1908 __ jmp(continue_execution);
1909 }
1910
lookupswitch()1911 void TemplateTable::lookupswitch() {
1912 transition(itos, itos);
1913 __ stop("lookupswitch bytecode should have been rewritten");
1914 }
1915
fast_linearswitch()1916 void TemplateTable::fast_linearswitch() {
1917 transition(itos, vtos);
1918 Label loop_entry, loop, found, continue_execution;
1919 // bswap rax so we can avoid bswapping the table entries
1920 __ bswapl(rax);
1921 // align r13
1922 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
1923 // this instruction (change offsets
1924 // below)
1925 __ andptr(rbx, -BytesPerInt);
1926 // set counter
1927 __ movl(rcx, Address(rbx, BytesPerInt));
1928 __ bswapl(rcx);
1929 __ jmpb(loop_entry);
1930 // table search
1931 __ bind(loop);
1932 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
1933 __ jcc(Assembler::equal, found);
1934 __ bind(loop_entry);
1935 __ decrementl(rcx);
1936 __ jcc(Assembler::greaterEqual, loop);
1937 // default case
1938 __ profile_switch_default(rax);
1939 __ movl(rdx, Address(rbx, 0));
1940 __ jmp(continue_execution);
1941 // entry found -> get offset
1942 __ bind(found);
1943 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
1944 __ profile_switch_case(rcx, rax, rbx);
1945 // continue execution
1946 __ bind(continue_execution);
1947 __ bswapl(rdx);
1948 __ movl2ptr(rdx, rdx);
1949 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
1950 __ addptr(r13, rdx);
1951 __ dispatch_only(vtos);
1952 }
1953
fast_binaryswitch()1954 void TemplateTable::fast_binaryswitch() {
1955 transition(itos, vtos);
1956 // Implementation using the following core algorithm:
1957 //
1958 // int binary_search(int key, LookupswitchPair* array, int n) {
1959 // // Binary search according to "Methodik des Programmierens" by
1960 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1961 // int i = 0;
1962 // int j = n;
1963 // while (i+1 < j) {
1964 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1965 // // with Q: for all i: 0 <= i < n: key < a[i]
1966 // // where a stands for the array and assuming that the (inexisting)
1967 // // element a[n] is infinitely big.
1968 // int h = (i + j) >> 1;
1969 // // i < h < j
1970 // if (key < array[h].fast_match()) {
1971 // j = h;
1972 // } else {
1973 // i = h;
1974 // }
1975 // }
1976 // // R: a[i] <= key < a[i+1] or Q
1977 // // (i.e., if key is within array, i is the correct index)
1978 // return i;
1979 // }
1980
1981 // Register allocation
1982 const Register key = rax; // already set (tosca)
1983 const Register array = rbx;
1984 const Register i = rcx;
1985 const Register j = rdx;
1986 const Register h = rdi;
1987 const Register temp = rsi;
1988
1989 // Find array start
1990 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
1991 // get rid of this
1992 // instruction (change
1993 // offsets below)
1994 __ andptr(array, -BytesPerInt);
1995
1996 // Initialize i & j
1997 __ xorl(i, i); // i = 0;
1998 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
1999
2000 // Convert j into native byteordering
2001 __ bswapl(j);
2002
2003 // And start
2004 Label entry;
2005 __ jmp(entry);
2006
2007 // binary search loop
2008 {
2009 Label loop;
2010 __ bind(loop);
2011 // int h = (i + j) >> 1;
2012 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2013 __ sarl(h, 1); // h = (i + j) >> 1;
2014 // if (key < array[h].fast_match()) {
2015 // j = h;
2016 // } else {
2017 // i = h;
2018 // }
2019 // Convert array[h].match to native byte-ordering before compare
2020 __ movl(temp, Address(array, h, Address::times_8));
2021 __ bswapl(temp);
2022 __ cmpl(key, temp);
2023 // j = h if (key < array[h].fast_match())
2024 __ cmovl(Assembler::less, j, h);
2025 // i = h if (key >= array[h].fast_match())
2026 __ cmovl(Assembler::greaterEqual, i, h);
2027 // while (i+1 < j)
2028 __ bind(entry);
2029 __ leal(h, Address(i, 1)); // i+1
2030 __ cmpl(h, j); // i+1 < j
2031 __ jcc(Assembler::less, loop);
2032 }
2033
2034 // end of binary search, result index is i (must check again!)
2035 Label default_case;
2036 // Convert array[i].match to native byte-ordering before compare
2037 __ movl(temp, Address(array, i, Address::times_8));
2038 __ bswapl(temp);
2039 __ cmpl(key, temp);
2040 __ jcc(Assembler::notEqual, default_case);
2041
2042 // entry found -> j = offset
2043 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2044 __ profile_switch_case(i, key, array);
2045 __ bswapl(j);
2046 __ movl2ptr(j, j);
2047 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2048 __ addptr(r13, j);
2049 __ dispatch_only(vtos);
2050
2051 // default case -> j = default offset
2052 __ bind(default_case);
2053 __ profile_switch_default(i);
2054 __ movl(j, Address(array, -2 * BytesPerInt));
2055 __ bswapl(j);
2056 __ movl2ptr(j, j);
2057 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
2058 __ addptr(r13, j);
2059 __ dispatch_only(vtos);
2060 }
2061
2062
_return(TosState state)2063 void TemplateTable::_return(TosState state) {
2064 transition(state, state);
2065 assert(_desc->calls_vm(),
2066 "inconsistent calls_vm information"); // call in remove_activation
2067
2068 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2069 assert(state == vtos, "only valid state");
2070 __ movptr(c_rarg1, aaddress(0));
2071 __ load_klass(rdi, c_rarg1);
2072 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2073 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2074 Label skip_register_finalizer;
2075 __ jcc(Assembler::zero, skip_register_finalizer);
2076
2077 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);
2078
2079 __ bind(skip_register_finalizer);
2080 }
2081
2082 // Narrow result if state is itos but result type is smaller.
2083 // Need to narrow in the return bytecode rather than in generate_return_entry
2084 // since compiled code callers expect the result to already be narrowed.
2085 if (state == itos) {
2086 __ narrow(rax);
2087 }
2088 __ remove_activation(state, r13);
2089
2090 __ jmp(r13);
2091 }
2092
2093 // ----------------------------------------------------------------------------
2094 // Volatile variables demand their effects be made known to all CPU's
2095 // in order. Store buffers on most chips allow reads & writes to
2096 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2097 // without some kind of memory barrier (i.e., it's not sufficient that
2098 // the interpreter does not reorder volatile references, the hardware
2099 // also must not reorder them).
2100 //
2101 // According to the new Java Memory Model (JMM):
2102 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2103 // writes act as aquire & release, so:
2104 // (2) A read cannot let unrelated NON-volatile memory refs that
2105 // happen after the read float up to before the read. It's OK for
2106 // non-volatile memory refs that happen before the volatile read to
2107 // float down below it.
2108 // (3) Similar a volatile write cannot let unrelated NON-volatile
2109 // memory refs that happen BEFORE the write float down to after the
2110 // write. It's OK for non-volatile memory refs that happen after the
2111 // volatile write to float up before it.
2112 //
2113 // We only put in barriers around volatile refs (they are expensive),
2114 // not _between_ memory refs (that would require us to track the
2115 // flavor of the previous memory refs). Requirements (2) and (3)
2116 // require some barriers before volatile stores and after volatile
2117 // loads. These nearly cover requirement (1) but miss the
2118 // volatile-store-volatile-load case. This final case is placed after
2119 // volatile-stores although it could just as well go before
2120 // volatile-loads.
volatile_barrier(Assembler::Membar_mask_bits order_constraint)2121 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
2122 order_constraint) {
2123 // Helper function to insert a is-volatile test and memory barrier
2124 if (os::is_MP()) { // Not needed on single CPU
2125 __ membar(order_constraint);
2126 }
2127 }
2128
resolve_cache_and_index(int byte_no,Register Rcache,Register index,size_t index_size)2129 void TemplateTable::resolve_cache_and_index(int byte_no,
2130 Register Rcache,
2131 Register index,
2132 size_t index_size) {
2133 const Register temp = rbx;
2134 assert_different_registers(Rcache, index, temp);
2135
2136 Label resolved;
2137 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2138 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2139 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2140 __ jcc(Assembler::equal, resolved);
2141
2142 // resolve first time through
2143 address entry;
2144 switch (bytecode()) {
2145 case Bytecodes::_getstatic:
2146 case Bytecodes::_putstatic:
2147 case Bytecodes::_getfield:
2148 case Bytecodes::_putfield:
2149 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
2150 break;
2151 case Bytecodes::_invokevirtual:
2152 case Bytecodes::_invokespecial:
2153 case Bytecodes::_invokestatic:
2154 case Bytecodes::_invokeinterface:
2155 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
2156 break;
2157 case Bytecodes::_invokehandle:
2158 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
2159 break;
2160 case Bytecodes::_invokedynamic:
2161 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
2162 break;
2163 default:
2164 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2165 break;
2166 }
2167 __ movl(temp, (int) bytecode());
2168 __ call_VM(noreg, entry, temp);
2169
2170 // Update registers with resolved info
2171 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2172 __ bind(resolved);
2173 }
2174
2175 // The cache and index registers must be set before call
load_field_cp_cache_entry(Register obj,Register cache,Register index,Register off,Register flags,bool is_static=false)2176 void TemplateTable::load_field_cp_cache_entry(Register obj,
2177 Register cache,
2178 Register index,
2179 Register off,
2180 Register flags,
2181 bool is_static = false) {
2182 assert_different_registers(cache, index, flags, off);
2183
2184 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2185 // Field offset
2186 __ movptr(off, Address(cache, index, Address::times_ptr,
2187 in_bytes(cp_base_offset +
2188 ConstantPoolCacheEntry::f2_offset())));
2189 // Flags
2190 __ movl(flags, Address(cache, index, Address::times_ptr,
2191 in_bytes(cp_base_offset +
2192 ConstantPoolCacheEntry::flags_offset())));
2193
2194 // klass overwrite register
2195 if (is_static) {
2196 __ movptr(obj, Address(cache, index, Address::times_ptr,
2197 in_bytes(cp_base_offset +
2198 ConstantPoolCacheEntry::f1_offset())));
2199 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2200 __ movptr(obj, Address(obj, mirror_offset));
2201 }
2202 }
2203
load_invoke_cp_cache_entry(int byte_no,Register method,Register itable_index,Register flags,bool is_invokevirtual,bool is_invokevfinal,bool is_invokedynamic)2204 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2205 Register method,
2206 Register itable_index,
2207 Register flags,
2208 bool is_invokevirtual,
2209 bool is_invokevfinal, /*unused*/
2210 bool is_invokedynamic) {
2211 // setup registers
2212 const Register cache = rcx;
2213 const Register index = rdx;
2214 assert_different_registers(method, flags);
2215 assert_different_registers(method, cache, index);
2216 assert_different_registers(itable_index, flags);
2217 assert_different_registers(itable_index, cache, index);
2218 // determine constant pool cache field offsets
2219 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2220 const int method_offset = in_bytes(
2221 ConstantPoolCache::base_offset() +
2222 ((byte_no == f2_byte)
2223 ? ConstantPoolCacheEntry::f2_offset()
2224 : ConstantPoolCacheEntry::f1_offset()));
2225 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2226 ConstantPoolCacheEntry::flags_offset());
2227 // access constant pool cache fields
2228 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2229 ConstantPoolCacheEntry::f2_offset());
2230
2231 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2232 resolve_cache_and_index(byte_no, cache, index, index_size);
2233 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2234
2235 if (itable_index != noreg) {
2236 // pick up itable or appendix index from f2 also:
2237 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2238 }
2239 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2240 }
2241
2242 // Correct values of the cache and index registers are preserved.
jvmti_post_field_access(Register cache,Register index,bool is_static,bool has_tos)2243 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2244 bool is_static, bool has_tos) {
2245 // do the JVMTI work here to avoid disturbing the register state below
2246 // We use c_rarg registers here because we want to use the register used in
2247 // the call to the VM
2248 if (JvmtiExport::can_post_field_access()) {
2249 // Check to see if a field access watch has been set before we
2250 // take the time to call into the VM.
2251 Label L1;
2252 assert_different_registers(cache, index, rax);
2253 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2254 __ testl(rax, rax);
2255 __ jcc(Assembler::zero, L1);
2256
2257 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
2258
2259 // cache entry pointer
2260 __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
2261 __ shll(c_rarg3, LogBytesPerWord);
2262 __ addptr(c_rarg2, c_rarg3);
2263 if (is_static) {
2264 __ xorl(c_rarg1, c_rarg1); // NULL object reference
2265 } else {
2266 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
2267 __ verify_oop(c_rarg1);
2268 }
2269 // c_rarg1: object pointer or NULL
2270 // c_rarg2: cache entry pointer
2271 // c_rarg3: jvalue object on the stack
2272 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2273 InterpreterRuntime::post_field_access),
2274 c_rarg1, c_rarg2, c_rarg3);
2275 __ get_cache_and_index_at_bcp(cache, index, 1);
2276 __ bind(L1);
2277 }
2278 }
2279
pop_and_check_object(Register r)2280 void TemplateTable::pop_and_check_object(Register r) {
2281 __ pop_ptr(r);
2282 __ null_check(r); // for field access must check obj.
2283 __ verify_oop(r);
2284 }
2285
getfield_or_static(int byte_no,bool is_static)2286 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2287 transition(vtos, vtos);
2288
2289 const Register cache = rcx;
2290 const Register index = rdx;
2291 const Register obj = c_rarg3;
2292 const Register off = rbx;
2293 const Register flags = rax;
2294 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
2295
2296 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2297 jvmti_post_field_access(cache, index, is_static, false);
2298 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2299
2300 if (!is_static) {
2301 // obj is on the stack
2302 pop_and_check_object(obj);
2303 }
2304
2305 const Address field(obj, off, Address::times_1);
2306
2307 Label Done, notByte, notBool, notInt, notShort, notChar,
2308 notLong, notFloat, notObj, notDouble;
2309
2310 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2311 // Make sure we don't need to mask edx after the above shift
2312 assert(btos == 0, "change code, btos != 0");
2313
2314 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2315 __ jcc(Assembler::notZero, notByte);
2316 // btos
2317 __ load_signed_byte(rax, field);
2318 __ push(btos);
2319 // Rewrite bytecode to be faster
2320 if (!is_static) {
2321 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2322 }
2323 __ jmp(Done);
2324
2325 __ bind(notByte);
2326 __ cmpl(flags, ztos);
2327 __ jcc(Assembler::notEqual, notBool);
2328
2329 // ztos (same code as btos)
2330 __ load_signed_byte(rax, field);
2331 __ push(ztos);
2332 // Rewrite bytecode to be faster
2333 if (!is_static) {
2334 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2335 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2336 }
2337 __ jmp(Done);
2338
2339 __ bind(notBool);
2340 __ cmpl(flags, atos);
2341 __ jcc(Assembler::notEqual, notObj);
2342 // atos
2343 __ load_heap_oop(rax, field);
2344 __ push(atos);
2345 if (!is_static) {
2346 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2347 }
2348 __ jmp(Done);
2349
2350 __ bind(notObj);
2351 __ cmpl(flags, itos);
2352 __ jcc(Assembler::notEqual, notInt);
2353 // itos
2354 __ movl(rax, field);
2355 __ push(itos);
2356 // Rewrite bytecode to be faster
2357 if (!is_static) {
2358 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2359 }
2360 __ jmp(Done);
2361
2362 __ bind(notInt);
2363 __ cmpl(flags, ctos);
2364 __ jcc(Assembler::notEqual, notChar);
2365 // ctos
2366 __ load_unsigned_short(rax, field);
2367 __ push(ctos);
2368 // Rewrite bytecode to be faster
2369 if (!is_static) {
2370 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2371 }
2372 __ jmp(Done);
2373
2374 __ bind(notChar);
2375 __ cmpl(flags, stos);
2376 __ jcc(Assembler::notEqual, notShort);
2377 // stos
2378 __ load_signed_short(rax, field);
2379 __ push(stos);
2380 // Rewrite bytecode to be faster
2381 if (!is_static) {
2382 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2383 }
2384 __ jmp(Done);
2385
2386 __ bind(notShort);
2387 __ cmpl(flags, ltos);
2388 __ jcc(Assembler::notEqual, notLong);
2389 // ltos
2390 __ movq(rax, field);
2391 __ push(ltos);
2392 // Rewrite bytecode to be faster
2393 if (!is_static) {
2394 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx);
2395 }
2396 __ jmp(Done);
2397
2398 __ bind(notLong);
2399 __ cmpl(flags, ftos);
2400 __ jcc(Assembler::notEqual, notFloat);
2401 // ftos
2402 __ movflt(xmm0, field);
2403 __ push(ftos);
2404 // Rewrite bytecode to be faster
2405 if (!is_static) {
2406 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2407 }
2408 __ jmp(Done);
2409
2410 __ bind(notFloat);
2411 #ifdef ASSERT
2412 __ cmpl(flags, dtos);
2413 __ jcc(Assembler::notEqual, notDouble);
2414 #endif
2415 // dtos
2416 __ movdbl(xmm0, field);
2417 __ push(dtos);
2418 // Rewrite bytecode to be faster
2419 if (!is_static) {
2420 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2421 }
2422 #ifdef ASSERT
2423 __ jmp(Done);
2424
2425 __ bind(notDouble);
2426 __ stop("Bad state");
2427 #endif
2428
2429 __ bind(Done);
2430 // [jk] not needed currently
2431 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2432 // Assembler::LoadStore));
2433 }
2434
2435
getfield(int byte_no)2436 void TemplateTable::getfield(int byte_no) {
2437 getfield_or_static(byte_no, false);
2438 }
2439
getstatic(int byte_no)2440 void TemplateTable::getstatic(int byte_no) {
2441 getfield_or_static(byte_no, true);
2442 }
2443
2444 // The registers cache and index expected to be set before call.
2445 // The function may destroy various registers, just not the cache and index registers.
jvmti_post_field_mod(Register cache,Register index,bool is_static)2446 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2447 transition(vtos, vtos);
2448
2449 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2450
2451 if (JvmtiExport::can_post_field_modification()) {
2452 // Check to see if a field modification watch has been set before
2453 // we take the time to call into the VM.
2454 Label L1;
2455 assert_different_registers(cache, index, rax);
2456 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2457 __ testl(rax, rax);
2458 __ jcc(Assembler::zero, L1);
2459
2460 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
2461
2462 if (is_static) {
2463 // Life is simple. Null out the object pointer.
2464 __ xorl(c_rarg1, c_rarg1);
2465 } else {
2466 // Life is harder. The stack holds the value on top, followed by
2467 // the object. We don't know the size of the value, though; it
2468 // could be one or two words depending on its type. As a result,
2469 // we must find the type to determine where the object is.
2470 __ movl(c_rarg3, Address(c_rarg2, rscratch1,
2471 Address::times_8,
2472 in_bytes(cp_base_offset +
2473 ConstantPoolCacheEntry::flags_offset())));
2474 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift);
2475 // Make sure we don't need to mask rcx after the above shift
2476 ConstantPoolCacheEntry::verify_tos_state_shift();
2477 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
2478 __ cmpl(c_rarg3, ltos);
2479 __ cmovptr(Assembler::equal,
2480 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
2481 __ cmpl(c_rarg3, dtos);
2482 __ cmovptr(Assembler::equal,
2483 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
2484 }
2485 // cache entry pointer
2486 __ addptr(c_rarg2, in_bytes(cp_base_offset));
2487 __ shll(rscratch1, LogBytesPerWord);
2488 __ addptr(c_rarg2, rscratch1);
2489 // object (tos)
2490 __ mov(c_rarg3, rsp);
2491 // c_rarg1: object pointer set up above (NULL if static)
2492 // c_rarg2: cache entry pointer
2493 // c_rarg3: jvalue object on the stack
2494 __ call_VM(noreg,
2495 CAST_FROM_FN_PTR(address,
2496 InterpreterRuntime::post_field_modification),
2497 c_rarg1, c_rarg2, c_rarg3);
2498 __ get_cache_and_index_at_bcp(cache, index, 1);
2499 __ bind(L1);
2500 }
2501 }
2502
putfield_or_static(int byte_no,bool is_static)2503 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2504 transition(vtos, vtos);
2505
2506 const Register cache = rcx;
2507 const Register index = rdx;
2508 const Register obj = rcx;
2509 const Register off = rbx;
2510 const Register flags = rax;
2511 const Register bc = c_rarg3;
2512
2513 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2514 jvmti_post_field_mod(cache, index, is_static);
2515 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2516
2517 // [jk] not needed currently
2518 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2519 // Assembler::StoreStore));
2520
2521 Label notVolatile, Done;
2522 __ movl(rdx, flags);
2523 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2524 __ andl(rdx, 0x1);
2525
2526 // field address
2527 const Address field(obj, off, Address::times_1);
2528
2529 Label notByte, notBool, notInt, notShort, notChar,
2530 notLong, notFloat, notObj, notDouble;
2531
2532 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2533
2534 assert(btos == 0, "change code, btos != 0");
2535 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2536 __ jcc(Assembler::notZero, notByte);
2537
2538 // btos
2539 {
2540 __ pop(btos);
2541 if (!is_static) pop_and_check_object(obj);
2542 __ movb(field, rax);
2543 if (!is_static) {
2544 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
2545 }
2546 __ jmp(Done);
2547 }
2548
2549 __ bind(notByte);
2550 __ cmpl(flags, ztos);
2551 __ jcc(Assembler::notEqual, notBool);
2552
2553 // ztos
2554 {
2555 __ pop(ztos);
2556 if (!is_static) pop_and_check_object(obj);
2557 __ andl(rax, 0x1);
2558 __ movb(field, rax);
2559 if (!is_static) {
2560 patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
2561 }
2562 __ jmp(Done);
2563 }
2564
2565 __ bind(notBool);
2566 __ cmpl(flags, atos);
2567 __ jcc(Assembler::notEqual, notObj);
2568
2569 // atos
2570 {
2571 __ pop(atos);
2572 if (!is_static) pop_and_check_object(obj);
2573 // Store into the field
2574 do_oop_store(_masm, field, rax, _bs->kind(), false);
2575 if (!is_static) {
2576 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
2577 }
2578 __ jmp(Done);
2579 }
2580
2581 __ bind(notObj);
2582 __ cmpl(flags, itos);
2583 __ jcc(Assembler::notEqual, notInt);
2584
2585 // itos
2586 {
2587 __ pop(itos);
2588 if (!is_static) pop_and_check_object(obj);
2589 __ movl(field, rax);
2590 if (!is_static) {
2591 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
2592 }
2593 __ jmp(Done);
2594 }
2595
2596 __ bind(notInt);
2597 __ cmpl(flags, ctos);
2598 __ jcc(Assembler::notEqual, notChar);
2599
2600 // ctos
2601 {
2602 __ pop(ctos);
2603 if (!is_static) pop_and_check_object(obj);
2604 __ movw(field, rax);
2605 if (!is_static) {
2606 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
2607 }
2608 __ jmp(Done);
2609 }
2610
2611 __ bind(notChar);
2612 __ cmpl(flags, stos);
2613 __ jcc(Assembler::notEqual, notShort);
2614
2615 // stos
2616 {
2617 __ pop(stos);
2618 if (!is_static) pop_and_check_object(obj);
2619 __ movw(field, rax);
2620 if (!is_static) {
2621 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
2622 }
2623 __ jmp(Done);
2624 }
2625
2626 __ bind(notShort);
2627 __ cmpl(flags, ltos);
2628 __ jcc(Assembler::notEqual, notLong);
2629
2630 // ltos
2631 {
2632 __ pop(ltos);
2633 if (!is_static) pop_and_check_object(obj);
2634 __ movq(field, rax);
2635 if (!is_static) {
2636 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
2637 }
2638 __ jmp(Done);
2639 }
2640
2641 __ bind(notLong);
2642 __ cmpl(flags, ftos);
2643 __ jcc(Assembler::notEqual, notFloat);
2644
2645 // ftos
2646 {
2647 __ pop(ftos);
2648 if (!is_static) pop_and_check_object(obj);
2649 __ movflt(field, xmm0);
2650 if (!is_static) {
2651 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
2652 }
2653 __ jmp(Done);
2654 }
2655
2656 __ bind(notFloat);
2657 #ifdef ASSERT
2658 __ cmpl(flags, dtos);
2659 __ jcc(Assembler::notEqual, notDouble);
2660 #endif
2661
2662 // dtos
2663 {
2664 __ pop(dtos);
2665 if (!is_static) pop_and_check_object(obj);
2666 __ movdbl(field, xmm0);
2667 if (!is_static) {
2668 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
2669 }
2670 }
2671
2672 #ifdef ASSERT
2673 __ jmp(Done);
2674
2675 __ bind(notDouble);
2676 __ stop("Bad state");
2677 #endif
2678
2679 __ bind(Done);
2680
2681 // Check for volatile store
2682 __ testl(rdx, rdx);
2683 __ jcc(Assembler::zero, notVolatile);
2684 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2685 Assembler::StoreStore));
2686 __ bind(notVolatile);
2687 }
2688
putfield(int byte_no)2689 void TemplateTable::putfield(int byte_no) {
2690 putfield_or_static(byte_no, false);
2691 }
2692
putstatic(int byte_no)2693 void TemplateTable::putstatic(int byte_no) {
2694 putfield_or_static(byte_no, true);
2695 }
2696
jvmti_post_fast_field_mod()2697 void TemplateTable::jvmti_post_fast_field_mod() {
2698 if (JvmtiExport::can_post_field_modification()) {
2699 // Check to see if a field modification watch has been set before
2700 // we take the time to call into the VM.
2701 Label L2;
2702 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2703 __ testl(c_rarg3, c_rarg3);
2704 __ jcc(Assembler::zero, L2);
2705 __ pop_ptr(rbx); // copy the object pointer from tos
2706 __ verify_oop(rbx);
2707 __ push_ptr(rbx); // put the object pointer back on tos
2708 // Save tos values before call_VM() clobbers them. Since we have
2709 // to do it for every data type, we use the saved values as the
2710 // jvalue object.
2711 switch (bytecode()) { // load values into the jvalue object
2712 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2713 case Bytecodes::_fast_bputfield: // fall through
2714 case Bytecodes::_fast_zputfield: // fall through
2715 case Bytecodes::_fast_sputfield: // fall through
2716 case Bytecodes::_fast_cputfield: // fall through
2717 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2718 case Bytecodes::_fast_dputfield: __ push_d(); break;
2719 case Bytecodes::_fast_fputfield: __ push_f(); break;
2720 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2721
2722 default:
2723 ShouldNotReachHere();
2724 }
2725 __ mov(c_rarg3, rsp); // points to jvalue on the stack
2726 // access constant pool cache entry
2727 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
2728 __ verify_oop(rbx);
2729 // rbx: object pointer copied above
2730 // c_rarg2: cache entry pointer
2731 // c_rarg3: jvalue object on the stack
2732 __ call_VM(noreg,
2733 CAST_FROM_FN_PTR(address,
2734 InterpreterRuntime::post_field_modification),
2735 rbx, c_rarg2, c_rarg3);
2736
2737 switch (bytecode()) { // restore tos values
2738 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2739 case Bytecodes::_fast_bputfield: // fall through
2740 case Bytecodes::_fast_zputfield: // fall through
2741 case Bytecodes::_fast_sputfield: // fall through
2742 case Bytecodes::_fast_cputfield: // fall through
2743 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2744 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2745 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2746 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2747 }
2748 __ bind(L2);
2749 }
2750 }
2751
fast_storefield(TosState state)2752 void TemplateTable::fast_storefield(TosState state) {
2753 transition(state, vtos);
2754
2755 ByteSize base = ConstantPoolCache::base_offset();
2756
2757 jvmti_post_fast_field_mod();
2758
2759 // access constant pool cache
2760 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2761
2762 // test for volatile with rdx
2763 __ movl(rdx, Address(rcx, rbx, Address::times_8,
2764 in_bytes(base +
2765 ConstantPoolCacheEntry::flags_offset())));
2766
2767 // replace index with field offset from cache entry
2768 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2769 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2770
2771 // [jk] not needed currently
2772 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
2773 // Assembler::StoreStore));
2774
2775 Label notVolatile;
2776 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2777 __ andl(rdx, 0x1);
2778
2779 // Get object from stack
2780 pop_and_check_object(rcx);
2781
2782 // field address
2783 const Address field(rcx, rbx, Address::times_1);
2784
2785 // access field
2786 switch (bytecode()) {
2787 case Bytecodes::_fast_aputfield:
2788 do_oop_store(_masm, field, rax, _bs->kind(), false);
2789 break;
2790 case Bytecodes::_fast_lputfield:
2791 __ movq(field, rax);
2792 break;
2793 case Bytecodes::_fast_iputfield:
2794 __ movl(field, rax);
2795 break;
2796 case Bytecodes::_fast_zputfield:
2797 __ andl(rax, 0x1); // boolean is true if LSB is 1
2798 // fall through to bputfield
2799 case Bytecodes::_fast_bputfield:
2800 __ movb(field, rax);
2801 break;
2802 case Bytecodes::_fast_sputfield:
2803 // fall through
2804 case Bytecodes::_fast_cputfield:
2805 __ movw(field, rax);
2806 break;
2807 case Bytecodes::_fast_fputfield:
2808 __ movflt(field, xmm0);
2809 break;
2810 case Bytecodes::_fast_dputfield:
2811 __ movdbl(field, xmm0);
2812 break;
2813 default:
2814 ShouldNotReachHere();
2815 }
2816
2817 // Check for volatile store
2818 __ testl(rdx, rdx);
2819 __ jcc(Assembler::zero, notVolatile);
2820 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2821 Assembler::StoreStore));
2822 __ bind(notVolatile);
2823 }
2824
2825
fast_accessfield(TosState state)2826 void TemplateTable::fast_accessfield(TosState state) {
2827 transition(atos, state);
2828
2829 // Do the JVMTI work here to avoid disturbing the register state below
2830 if (JvmtiExport::can_post_field_access()) {
2831 // Check to see if a field access watch has been set before we
2832 // take the time to call into the VM.
2833 Label L1;
2834 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2835 __ testl(rcx, rcx);
2836 __ jcc(Assembler::zero, L1);
2837 // access constant pool cache entry
2838 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
2839 __ verify_oop(rax);
2840 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2841 __ mov(c_rarg1, rax);
2842 // c_rarg1: object pointer copied above
2843 // c_rarg2: cache entry pointer
2844 __ call_VM(noreg,
2845 CAST_FROM_FN_PTR(address,
2846 InterpreterRuntime::post_field_access),
2847 c_rarg1, c_rarg2);
2848 __ pop_ptr(rax); // restore object pointer
2849 __ bind(L1);
2850 }
2851
2852 // access constant pool cache
2853 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2854 // replace index with field offset from cache entry
2855 // [jk] not needed currently
2856 // if (os::is_MP()) {
2857 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
2858 // in_bytes(ConstantPoolCache::base_offset() +
2859 // ConstantPoolCacheEntry::flags_offset())));
2860 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2861 // __ andl(rdx, 0x1);
2862 // }
2863 __ movptr(rbx, Address(rcx, rbx, Address::times_8,
2864 in_bytes(ConstantPoolCache::base_offset() +
2865 ConstantPoolCacheEntry::f2_offset())));
2866
2867 // rax: object
2868 __ verify_oop(rax);
2869 __ null_check(rax);
2870 Address field(rax, rbx, Address::times_1);
2871
2872 // access field
2873 switch (bytecode()) {
2874 case Bytecodes::_fast_agetfield:
2875 __ load_heap_oop(rax, field);
2876 __ verify_oop(rax);
2877 break;
2878 case Bytecodes::_fast_lgetfield:
2879 __ movq(rax, field);
2880 break;
2881 case Bytecodes::_fast_igetfield:
2882 __ movl(rax, field);
2883 break;
2884 case Bytecodes::_fast_bgetfield:
2885 __ movsbl(rax, field);
2886 break;
2887 case Bytecodes::_fast_sgetfield:
2888 __ load_signed_short(rax, field);
2889 break;
2890 case Bytecodes::_fast_cgetfield:
2891 __ load_unsigned_short(rax, field);
2892 break;
2893 case Bytecodes::_fast_fgetfield:
2894 __ movflt(xmm0, field);
2895 break;
2896 case Bytecodes::_fast_dgetfield:
2897 __ movdbl(xmm0, field);
2898 break;
2899 default:
2900 ShouldNotReachHere();
2901 }
2902 // [jk] not needed currently
2903 // if (os::is_MP()) {
2904 // Label notVolatile;
2905 // __ testl(rdx, rdx);
2906 // __ jcc(Assembler::zero, notVolatile);
2907 // __ membar(Assembler::LoadLoad);
2908 // __ bind(notVolatile);
2909 //};
2910 }
2911
fast_xaccess(TosState state)2912 void TemplateTable::fast_xaccess(TosState state) {
2913 transition(vtos, state);
2914
2915 // get receiver
2916 __ movptr(rax, aaddress(0));
2917 // access constant pool cache
2918 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2919 __ movptr(rbx,
2920 Address(rcx, rdx, Address::times_8,
2921 in_bytes(ConstantPoolCache::base_offset() +
2922 ConstantPoolCacheEntry::f2_offset())));
2923 // make sure exception is reported in correct bcp range (getfield is
2924 // next instruction)
2925 __ increment(r13);
2926 __ null_check(rax);
2927 switch (state) {
2928 case itos:
2929 __ movl(rax, Address(rax, rbx, Address::times_1));
2930 break;
2931 case atos:
2932 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
2933 __ verify_oop(rax);
2934 break;
2935 case ftos:
2936 __ movflt(xmm0, Address(rax, rbx, Address::times_1));
2937 break;
2938 default:
2939 ShouldNotReachHere();
2940 }
2941
2942 // [jk] not needed currently
2943 // if (os::is_MP()) {
2944 // Label notVolatile;
2945 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
2946 // in_bytes(ConstantPoolCache::base_offset() +
2947 // ConstantPoolCacheEntry::flags_offset())));
2948 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2949 // __ testl(rdx, 0x1);
2950 // __ jcc(Assembler::zero, notVolatile);
2951 // __ membar(Assembler::LoadLoad);
2952 // __ bind(notVolatile);
2953 // }
2954
2955 __ decrement(r13);
2956 }
2957
2958
2959
2960 //-----------------------------------------------------------------------------
2961 // Calls
2962
count_calls(Register method,Register temp)2963 void TemplateTable::count_calls(Register method, Register temp) {
2964 // implemented elsewhere
2965 ShouldNotReachHere();
2966 }
2967
prepare_invoke(int byte_no,Register method,Register index,Register recv,Register flags)2968 void TemplateTable::prepare_invoke(int byte_no,
2969 Register method, // linked method (or i-klass)
2970 Register index, // itable index, MethodType, etc.
2971 Register recv, // if caller wants to see it
2972 Register flags // if caller wants to test it
2973 ) {
2974 // determine flags
2975 const Bytecodes::Code code = bytecode();
2976 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2977 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2978 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2979 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2980 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2981 const bool load_receiver = (recv != noreg);
2982 const bool save_flags = (flags != noreg);
2983 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2984 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2985 assert(flags == noreg || flags == rdx, "");
2986 assert(recv == noreg || recv == rcx, "");
2987
2988 // setup registers & access constant pool cache
2989 if (recv == noreg) recv = rcx;
2990 if (flags == noreg) flags = rdx;
2991 assert_different_registers(method, index, recv, flags);
2992
2993 // save 'interpreter return address'
2994 __ save_bcp();
2995
2996 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2997
2998 // maybe push appendix to arguments (just before return address)
2999 if (is_invokedynamic || is_invokehandle) {
3000 Label L_no_push;
3001 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3002 __ jcc(Assembler::zero, L_no_push);
3003 // Push the appendix as a trailing parameter.
3004 // This must be done before we get the receiver,
3005 // since the parameter_size includes it.
3006 __ push(rbx);
3007 __ mov(rbx, index);
3008 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3009 __ load_resolved_reference_at_index(index, rbx);
3010 __ pop(rbx);
3011 __ push(index); // push appendix (MethodType, CallSite, etc.)
3012 __ bind(L_no_push);
3013 }
3014
3015 // load receiver if needed (after appendix is pushed so parameter size is correct)
3016 // Note: no return address pushed yet
3017 if (load_receiver) {
3018 __ movl(recv, flags);
3019 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3020 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3021 const int receiver_is_at_end = -1; // back off one slot to get receiver
3022 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3023 __ movptr(recv, recv_addr);
3024 __ verify_oop(recv);
3025 }
3026
3027 if (save_flags) {
3028 __ movl(r13, flags);
3029 }
3030
3031 // compute return type
3032 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3033 // Make sure we don't need to mask flags after the above shift
3034 ConstantPoolCacheEntry::verify_tos_state_shift();
3035 // load return address
3036 {
3037 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3038 ExternalAddress table(table_addr);
3039 __ lea(rscratch1, table);
3040 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
3041 }
3042
3043 // push return address
3044 __ push(flags);
3045
3046 // Restore flags value from the constant pool cache, and restore rsi
3047 // for later null checks. r13 is the bytecode pointer
3048 if (save_flags) {
3049 __ movl(flags, r13);
3050 __ restore_bcp();
3051 }
3052 }
3053
3054
invokevirtual_helper(Register index,Register recv,Register flags)3055 void TemplateTable::invokevirtual_helper(Register index,
3056 Register recv,
3057 Register flags) {
3058 // Uses temporary registers rax, rdx
3059 assert_different_registers(index, recv, rax, rdx);
3060 assert(index == rbx, "");
3061 assert(recv == rcx, "");
3062
3063 // Test for an invoke of a final method
3064 Label notFinal;
3065 __ movl(rax, flags);
3066 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3067 __ jcc(Assembler::zero, notFinal);
3068
3069 const Register method = index; // method must be rbx
3070 assert(method == rbx,
3071 "Method* must be rbx for interpreter calling convention");
3072
3073 // do the call - the index is actually the method to call
3074 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3075
3076 // It's final, need a null check here!
3077 __ null_check(recv);
3078
3079 // profile this call
3080 __ profile_final_call(rax);
3081 __ profile_arguments_type(rax, method, r13, true);
3082
3083 __ jump_from_interpreted(method, rax);
3084
3085 __ bind(notFinal);
3086
3087 // get receiver klass
3088 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3089 __ load_klass(rax, recv);
3090
3091 // profile this call
3092 __ profile_virtual_call(rax, r14, rdx);
3093
3094 // get target Method* & entry point
3095 __ lookup_virtual_method(rax, index, method);
3096 __ profile_arguments_type(rdx, method, r13, true);
3097 __ jump_from_interpreted(method, rdx);
3098 }
3099
3100
invokevirtual(int byte_no)3101 void TemplateTable::invokevirtual(int byte_no) {
3102 transition(vtos, vtos);
3103 assert(byte_no == f2_byte, "use this argument");
3104 prepare_invoke(byte_no,
3105 rbx, // method or vtable index
3106 noreg, // unused itable index
3107 rcx, rdx); // recv, flags
3108
3109 // rbx: index
3110 // rcx: receiver
3111 // rdx: flags
3112
3113 invokevirtual_helper(rbx, rcx, rdx);
3114 }
3115
3116
invokespecial(int byte_no)3117 void TemplateTable::invokespecial(int byte_no) {
3118 transition(vtos, vtos);
3119 assert(byte_no == f1_byte, "use this argument");
3120 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3121 rcx); // get receiver also for null check
3122 __ verify_oop(rcx);
3123 __ null_check(rcx);
3124 // do the call
3125 __ profile_call(rax);
3126 __ profile_arguments_type(rax, rbx, r13, false);
3127 __ jump_from_interpreted(rbx, rax);
3128 }
3129
3130
invokestatic(int byte_no)3131 void TemplateTable::invokestatic(int byte_no) {
3132 transition(vtos, vtos);
3133 assert(byte_no == f1_byte, "use this argument");
3134 prepare_invoke(byte_no, rbx); // get f1 Method*
3135 // do the call
3136 __ profile_call(rax);
3137 __ profile_arguments_type(rax, rbx, r13, false);
3138 __ jump_from_interpreted(rbx, rax);
3139 }
3140
fast_invokevfinal(int byte_no)3141 void TemplateTable::fast_invokevfinal(int byte_no) {
3142 transition(vtos, vtos);
3143 assert(byte_no == f2_byte, "use this argument");
3144 __ stop("fast_invokevfinal not used on amd64");
3145 }
3146
invokeinterface(int byte_no)3147 void TemplateTable::invokeinterface(int byte_no) {
3148 transition(vtos, vtos);
3149 assert(byte_no == f1_byte, "use this argument");
3150 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 Method*
3151 rcx, rdx); // recv, flags
3152
3153 // rax: reference klass (from f1)
3154 // rbx: method (from f2)
3155 // rcx: receiver
3156 // rdx: flags
3157
3158 // Special case of invokeinterface called for virtual method of
3159 // java.lang.Object. See cpCacheOop.cpp for details.
3160 // This code isn't produced by javac, but could be produced by
3161 // another compliant java compiler.
3162 Label notMethod;
3163 __ movl(r14, rdx);
3164 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3165 __ jcc(Assembler::zero, notMethod);
3166
3167 invokevirtual_helper(rbx, rcx, rdx);
3168 __ bind(notMethod);
3169
3170 // Get receiver klass into rdx - also a null check
3171 __ restore_locals(); // restore r14
3172 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3173 __ load_klass(rdx, rcx);
3174
3175 Label no_such_interface, no_such_method;
3176
3177 // Receiver subtype check against REFC.
3178 // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3179 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3180 rdx, rax, noreg,
3181 // outputs: scan temp. reg, scan temp. reg
3182 r13, r14,
3183 no_such_interface,
3184 /*return_method=*/false);
3185
3186 // profile this call
3187 __ restore_bcp(); // rbcp was destroyed by receiver type check
3188 __ profile_virtual_call(rdx, r13, r14);
3189
3190 // Get declaring interface class from method, and itable index
3191 __ movptr(rax, Address(rbx, Method::const_offset()));
3192 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
3193 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
3194 __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3195 __ subl(rbx, Method::itable_index_max);
3196 __ negl(rbx);
3197
3198 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3199 rdx, rax, rbx,
3200 // outputs: method, scan temp. reg
3201 rbx, r13,
3202 no_such_interface);
3203
3204 // rbx: Method* to call
3205 // rcx: receiver
3206 // Check for abstract method error
3207 // Note: This should be done more efficiently via a throw_abstract_method_error
3208 // interpreter entry point and a conditional jump to it in case of a null
3209 // method.
3210 __ testptr(rbx, rbx);
3211 __ jcc(Assembler::zero, no_such_method);
3212
3213 __ profile_arguments_type(rdx, rbx, r13, true);
3214
3215 // do the call
3216 // rcx: receiver
3217 // rbx,: Method*
3218 __ jump_from_interpreted(rbx, rdx);
3219 __ should_not_reach_here();
3220
3221 // exception handling code follows...
3222 // note: must restore interpreter registers to canonical
3223 // state for exception handling to work correctly!
3224
3225 __ bind(no_such_method);
3226 // throw exception
3227 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3228 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3229 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3230 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3231 // the call_VM checks for exception, so we should never return here.
3232 __ should_not_reach_here();
3233
3234 __ bind(no_such_interface);
3235 // throw exception
3236 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3237 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed)
3238 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3239 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3240 InterpreterRuntime::throw_IncompatibleClassChangeError));
3241 // the call_VM checks for exception, so we should never return here.
3242 __ should_not_reach_here();
3243 }
3244
3245
invokehandle(int byte_no)3246 void TemplateTable::invokehandle(int byte_no) {
3247 transition(vtos, vtos);
3248 assert(byte_no == f1_byte, "use this argument");
3249 const Register rbx_method = rbx;
3250 const Register rax_mtype = rax;
3251 const Register rcx_recv = rcx;
3252 const Register rdx_flags = rdx;
3253
3254 if (!EnableInvokeDynamic) {
3255 // rewriter does not generate this bytecode
3256 __ should_not_reach_here();
3257 return;
3258 }
3259
3260 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3261 __ verify_method_ptr(rbx_method);
3262 __ verify_oop(rcx_recv);
3263 __ null_check(rcx_recv);
3264
3265 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3266 // rbx: MH.invokeExact_MT method (from f2)
3267
3268 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3269
3270 // FIXME: profile the LambdaForm also
3271 __ profile_final_call(rax);
3272 __ profile_arguments_type(rdx, rbx_method, r13, true);
3273
3274 __ jump_from_interpreted(rbx_method, rdx);
3275 }
3276
3277
invokedynamic(int byte_no)3278 void TemplateTable::invokedynamic(int byte_no) {
3279 transition(vtos, vtos);
3280 assert(byte_no == f1_byte, "use this argument");
3281
3282 if (!EnableInvokeDynamic) {
3283 // We should not encounter this bytecode if !EnableInvokeDynamic.
3284 // The verifier will stop it. However, if we get past the verifier,
3285 // this will stop the thread in a reasonable way, without crashing the JVM.
3286 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3287 InterpreterRuntime::throw_IncompatibleClassChangeError));
3288 // the call_VM checks for exception, so we should never return here.
3289 __ should_not_reach_here();
3290 return;
3291 }
3292
3293 const Register rbx_method = rbx;
3294 const Register rax_callsite = rax;
3295
3296 prepare_invoke(byte_no, rbx_method, rax_callsite);
3297
3298 // rax: CallSite object (from cpool->resolved_references[f1])
3299 // rbx: MH.linkToCallSite method (from f2)
3300
3301 // Note: rax_callsite is already pushed by prepare_invoke
3302
3303 // %%% should make a type profile for any invokedynamic that takes a ref argument
3304 // profile this call
3305 __ profile_call(r13);
3306 __ profile_arguments_type(rdx, rbx_method, r13, false);
3307
3308 __ verify_oop(rax_callsite);
3309
3310 __ jump_from_interpreted(rbx_method, rdx);
3311 }
3312
3313
3314 //-----------------------------------------------------------------------------
3315 // Allocation
3316
_new()3317 void TemplateTable::_new() {
3318 transition(vtos, atos);
3319 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3320 Label slow_case;
3321 Label done;
3322 Label initialize_header;
3323 Label initialize_object; // including clearing the fields
3324 Label allocate_shared;
3325
3326 __ get_cpool_and_tags(rsi, rax);
3327 // Make sure the class we're about to instantiate has been resolved.
3328 // This is done before loading InstanceKlass to be consistent with the order
3329 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3330 const int tags_offset = Array<u1>::base_offset_in_bytes();
3331 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
3332 JVM_CONSTANT_Class);
3333 __ jcc(Assembler::notEqual, slow_case);
3334
3335 // get InstanceKlass
3336 __ movptr(rsi, Address(rsi, rdx,
3337 Address::times_8, sizeof(ConstantPool)));
3338
3339 // make sure klass is initialized & doesn't have finalizer
3340 // make sure klass is fully initialized
3341 __ cmpb(Address(rsi,
3342 InstanceKlass::init_state_offset()),
3343 InstanceKlass::fully_initialized);
3344 __ jcc(Assembler::notEqual, slow_case);
3345
3346 // get instance_size in InstanceKlass (scaled to a count of bytes)
3347 __ movl(rdx,
3348 Address(rsi,
3349 Klass::layout_helper_offset()));
3350 // test to see if it has a finalizer or is malformed in some way
3351 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3352 __ jcc(Assembler::notZero, slow_case);
3353
3354 // Allocate the instance
3355 // 1) Try to allocate in the TLAB
3356 // 2) if fail and the object is large allocate in the shared Eden
3357 // 3) if the above fails (or is not applicable), go to a slow case
3358 // (creates a new TLAB, etc.)
3359
3360 const bool allow_shared_alloc =
3361 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3362
3363 if (UseTLAB) {
3364 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
3365 __ lea(rbx, Address(rax, rdx, Address::times_1));
3366 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
3367 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3368 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3369 if (ZeroTLAB) {
3370 // the fields have been already cleared
3371 __ jmp(initialize_header);
3372 } else {
3373 // initialize both the header and fields
3374 __ jmp(initialize_object);
3375 }
3376 }
3377
3378 // Allocation in the shared Eden, if allowed.
3379 //
3380 // rdx: instance size in bytes
3381 if (allow_shared_alloc) {
3382 __ bind(allocate_shared);
3383
3384 ExternalAddress top((address)Universe::heap()->top_addr());
3385 ExternalAddress end((address)Universe::heap()->end_addr());
3386
3387 const Register RtopAddr = rscratch1;
3388 const Register RendAddr = rscratch2;
3389
3390 __ lea(RtopAddr, top);
3391 __ lea(RendAddr, end);
3392 __ movptr(rax, Address(RtopAddr, 0));
3393
3394 // For retries rax gets set by cmpxchgq
3395 Label retry;
3396 __ bind(retry);
3397 __ lea(rbx, Address(rax, rdx, Address::times_1));
3398 __ cmpptr(rbx, Address(RendAddr, 0));
3399 __ jcc(Assembler::above, slow_case);
3400
3401 // Compare rax with the top addr, and if still equal, store the new
3402 // top addr in rbx at the address of the top addr pointer. Sets ZF if was
3403 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3404 //
3405 // rax: object begin
3406 // rbx: object end
3407 // rdx: instance size in bytes
3408 if (os::is_MP()) {
3409 __ lock();
3410 }
3411 __ cmpxchgptr(rbx, Address(RtopAddr, 0));
3412
3413 // if someone beat us on the allocation, try again, otherwise continue
3414 __ jcc(Assembler::notEqual, retry);
3415
3416 __ incr_allocated_bytes(r15_thread, rdx, 0);
3417 }
3418
3419 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3420 // The object is initialized before the header. If the object size is
3421 // zero, go directly to the header initialization.
3422 __ bind(initialize_object);
3423 __ decrementl(rdx, sizeof(oopDesc));
3424 __ jcc(Assembler::zero, initialize_header);
3425
3426 // Initialize object fields
3427 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3428 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop
3429 {
3430 Label loop;
3431 __ bind(loop);
3432 __ movq(Address(rax, rdx, Address::times_8,
3433 sizeof(oopDesc) - oopSize),
3434 rcx);
3435 __ decrementl(rdx);
3436 __ jcc(Assembler::notZero, loop);
3437 }
3438
3439 // initialize object header only.
3440 __ bind(initialize_header);
3441 if (UseBiasedLocking) {
3442 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
3443 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
3444 } else {
3445 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
3446 (intptr_t) markOopDesc::prototype()); // header (address 0x1)
3447 }
3448 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3449 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops
3450 __ store_klass(rax, rsi); // store klass last
3451
3452 {
3453 SkipIfEqual skip(_masm, &DTraceAllocProbes, false);
3454 // Trigger dtrace event for fastpath
3455 __ push(atos); // save the return value
3456 __ call_VM_leaf(
3457 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3458 __ pop(atos); // restore the return value
3459
3460 }
3461 __ jmp(done);
3462 }
3463
3464
3465 // slow case
3466 __ bind(slow_case);
3467 __ get_constant_pool(c_rarg1);
3468 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3469 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3470 __ verify_oop(rax);
3471
3472 // continue
3473 __ bind(done);
3474 }
3475
newarray()3476 void TemplateTable::newarray() {
3477 transition(itos, atos);
3478 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3479 __ movl(c_rarg2, rax);
3480 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3481 c_rarg1, c_rarg2);
3482 }
3483
anewarray()3484 void TemplateTable::anewarray() {
3485 transition(itos, atos);
3486 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3487 __ get_constant_pool(c_rarg1);
3488 __ movl(c_rarg3, rax);
3489 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3490 c_rarg1, c_rarg2, c_rarg3);
3491 }
3492
arraylength()3493 void TemplateTable::arraylength() {
3494 transition(atos, itos);
3495 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3496 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3497 }
3498
checkcast()3499 void TemplateTable::checkcast() {
3500 transition(atos, atos);
3501 Label done, is_null, ok_is_subtype, quicked, resolved;
3502 __ testptr(rax, rax); // object is in rax
3503 __ jcc(Assembler::zero, is_null);
3504
3505 // Get cpool & tags index
3506 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3507 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3508 // See if bytecode has already been quicked
3509 __ cmpb(Address(rdx, rbx,
3510 Address::times_1,
3511 Array<u1>::base_offset_in_bytes()),
3512 JVM_CONSTANT_Class);
3513 __ jcc(Assembler::equal, quicked);
3514 __ push(atos); // save receiver for result, and for GC
3515 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3516 // vm_result_2 has metadata result
3517 __ get_vm_result_2(rax, r15_thread);
3518 __ pop_ptr(rdx); // restore receiver
3519 __ jmpb(resolved);
3520
3521 // Get superklass in rax and subklass in rbx
3522 __ bind(quicked);
3523 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
3524 __ movptr(rax, Address(rcx, rbx,
3525 Address::times_8, sizeof(ConstantPool)));
3526
3527 __ bind(resolved);
3528 __ load_klass(rbx, rdx);
3529
3530 // Generate subtype check. Blows rcx, rdi. Object in rdx.
3531 // Superklass in rax. Subklass in rbx.
3532 __ gen_subtype_check(rbx, ok_is_subtype);
3533
3534 // Come here on failure
3535 __ push_ptr(rdx);
3536 // object is at TOS
3537 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3538
3539 // Come here on success
3540 __ bind(ok_is_subtype);
3541 __ mov(rax, rdx); // Restore object in rdx
3542
3543 // Collect counts on whether this check-cast sees NULLs a lot or not.
3544 if (ProfileInterpreter) {
3545 __ jmp(done);
3546 __ bind(is_null);
3547 __ profile_null_seen(rcx);
3548 } else {
3549 __ bind(is_null); // same as 'done'
3550 }
3551 __ bind(done);
3552 }
3553
instanceof()3554 void TemplateTable::instanceof() {
3555 transition(atos, itos);
3556 Label done, is_null, ok_is_subtype, quicked, resolved;
3557 __ testptr(rax, rax);
3558 __ jcc(Assembler::zero, is_null);
3559
3560 // Get cpool & tags index
3561 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
3562 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
3563 // See if bytecode has already been quicked
3564 __ cmpb(Address(rdx, rbx,
3565 Address::times_1,
3566 Array<u1>::base_offset_in_bytes()),
3567 JVM_CONSTANT_Class);
3568 __ jcc(Assembler::equal, quicked);
3569
3570 __ push(atos); // save receiver for result, and for GC
3571 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3572 // vm_result_2 has metadata result
3573 __ get_vm_result_2(rax, r15_thread);
3574 __ pop_ptr(rdx); // restore receiver
3575 __ verify_oop(rdx);
3576 __ load_klass(rdx, rdx);
3577 __ jmpb(resolved);
3578
3579 // Get superklass in rax and subklass in rdx
3580 __ bind(quicked);
3581 __ load_klass(rdx, rax);
3582 __ movptr(rax, Address(rcx, rbx,
3583 Address::times_8, sizeof(ConstantPool)));
3584
3585 __ bind(resolved);
3586
3587 // Generate subtype check. Blows rcx, rdi
3588 // Superklass in rax. Subklass in rdx.
3589 __ gen_subtype_check(rdx, ok_is_subtype);
3590
3591 // Come here on failure
3592 __ xorl(rax, rax);
3593 __ jmpb(done);
3594 // Come here on success
3595 __ bind(ok_is_subtype);
3596 __ movl(rax, 1);
3597
3598 // Collect counts on whether this test sees NULLs a lot or not.
3599 if (ProfileInterpreter) {
3600 __ jmp(done);
3601 __ bind(is_null);
3602 __ profile_null_seen(rcx);
3603 } else {
3604 __ bind(is_null); // same as 'done'
3605 }
3606 __ bind(done);
3607 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
3608 // rax = 1: obj != NULL and obj is an instanceof the specified klass
3609 }
3610
3611 //-----------------------------------------------------------------------------
3612 // Breakpoints
_breakpoint()3613 void TemplateTable::_breakpoint() {
3614 // Note: We get here even if we are single stepping..
3615 // jbug inists on setting breakpoints at every bytecode
3616 // even if we are in single step mode.
3617
3618 transition(vtos, vtos);
3619
3620 // get the unpatched byte code
3621 __ get_method(c_rarg1);
3622 __ call_VM(noreg,
3623 CAST_FROM_FN_PTR(address,
3624 InterpreterRuntime::get_original_bytecode_at),
3625 c_rarg1, r13);
3626 __ mov(rbx, rax);
3627
3628 // post the breakpoint event
3629 __ get_method(c_rarg1);
3630 __ call_VM(noreg,
3631 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
3632 c_rarg1, r13);
3633
3634 // complete the execution of original bytecode
3635 __ dispatch_only_normal(vtos);
3636 }
3637
3638 //-----------------------------------------------------------------------------
3639 // Exceptions
3640
athrow()3641 void TemplateTable::athrow() {
3642 transition(atos, vtos);
3643 __ null_check(rax);
3644 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3645 }
3646
3647 //-----------------------------------------------------------------------------
3648 // Synchronization
3649 //
3650 // Note: monitorenter & exit are symmetric routines; which is reflected
3651 // in the assembly code structure as well
3652 //
3653 // Stack layout:
3654 //
3655 // [expressions ] <--- rsp = expression stack top
3656 // ..
3657 // [expressions ]
3658 // [monitor entry] <--- monitor block top = expression stack bot
3659 // ..
3660 // [monitor entry]
3661 // [frame data ] <--- monitor block bot
3662 // ...
3663 // [saved rbp ] <--- rbp
monitorenter()3664 void TemplateTable::monitorenter() {
3665 transition(atos, vtos);
3666
3667 // check for NULL object
3668 __ null_check(rax);
3669
3670 const Address monitor_block_top(
3671 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3672 const Address monitor_block_bot(
3673 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3674 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3675
3676 Label allocated;
3677
3678 // initialize entry pointer
3679 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL
3680
3681 // find a free slot in the monitor block (result in c_rarg1)
3682 {
3683 Label entry, loop, exit;
3684 __ movptr(c_rarg3, monitor_block_top); // points to current entry,
3685 // starting with top-most entry
3686 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3687 // of monitor block
3688 __ jmpb(entry);
3689
3690 __ bind(loop);
3691 // check if current entry is used
3692 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
3693 // if not used then remember entry in c_rarg1
3694 __ cmov(Assembler::equal, c_rarg1, c_rarg3);
3695 // check if current entry is for same object
3696 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
3697 // if same object then stop searching
3698 __ jccb(Assembler::equal, exit);
3699 // otherwise advance to next entry
3700 __ addptr(c_rarg3, entry_size);
3701 __ bind(entry);
3702 // check if bottom reached
3703 __ cmpptr(c_rarg3, c_rarg2);
3704 // if not at bottom then check this entry
3705 __ jcc(Assembler::notEqual, loop);
3706 __ bind(exit);
3707 }
3708
3709 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
3710 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
3711
3712 // allocate one if there's no free slot
3713 {
3714 Label entry, loop;
3715 // 1. compute new pointers // rsp: old expression stack top
3716 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
3717 __ subptr(rsp, entry_size); // move expression stack top
3718 __ subptr(c_rarg1, entry_size); // move expression stack bottom
3719 __ mov(c_rarg3, rsp); // set start value for copy loop
3720 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
3721 __ jmp(entry);
3722 // 2. move expression stack contents
3723 __ bind(loop);
3724 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
3725 // word from old location
3726 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
3727 __ addptr(c_rarg3, wordSize); // advance to next word
3728 __ bind(entry);
3729 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
3730 __ jcc(Assembler::notEqual, loop); // if not at bottom then
3731 // copy next word
3732 }
3733
3734 // call run-time routine
3735 // c_rarg1: points to monitor entry
3736 __ bind(allocated);
3737
3738 // Increment bcp to point to the next bytecode, so exception
3739 // handling for async. exceptions work correctly.
3740 // The object has already been poped from the stack, so the
3741 // expression stack looks correct.
3742 __ increment(r13);
3743
3744 // store object
3745 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
3746 __ lock_object(c_rarg1);
3747
3748 // check to make sure this monitor doesn't cause stack overflow after locking
3749 __ save_bcp(); // in case of exception
3750 __ generate_stack_overflow_check(0);
3751
3752 // The bcp has already been incremented. Just need to dispatch to
3753 // next instruction.
3754 __ dispatch_next(vtos);
3755 }
3756
3757
monitorexit()3758 void TemplateTable::monitorexit() {
3759 transition(atos, vtos);
3760
3761 // check for NULL object
3762 __ null_check(rax);
3763
3764 const Address monitor_block_top(
3765 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3766 const Address monitor_block_bot(
3767 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3768 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
3769
3770 Label found;
3771
3772 // find matching slot
3773 {
3774 Label entry, loop;
3775 __ movptr(c_rarg1, monitor_block_top); // points to current entry,
3776 // starting with top-most entry
3777 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
3778 // of monitor block
3779 __ jmpb(entry);
3780
3781 __ bind(loop);
3782 // check if current entry is for same object
3783 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
3784 // if same object then stop searching
3785 __ jcc(Assembler::equal, found);
3786 // otherwise advance to next entry
3787 __ addptr(c_rarg1, entry_size);
3788 __ bind(entry);
3789 // check if bottom reached
3790 __ cmpptr(c_rarg1, c_rarg2);
3791 // if not at bottom then check this entry
3792 __ jcc(Assembler::notEqual, loop);
3793 }
3794
3795 // error handling. Unlocking was not block-structured
3796 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3797 InterpreterRuntime::throw_illegal_monitor_state_exception));
3798 __ should_not_reach_here();
3799
3800 // call run-time routine
3801 // rsi: points to monitor entry
3802 __ bind(found);
3803 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3804 __ unlock_object(c_rarg1);
3805 __ pop_ptr(rax); // discard object
3806 }
3807
3808
3809 // Wide instructions
wide()3810 void TemplateTable::wide() {
3811 transition(vtos, vtos);
3812 __ load_unsigned_byte(rbx, at_bcp(1));
3813 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point));
3814 __ jmp(Address(rscratch1, rbx, Address::times_8));
3815 // Note: the r13 increment step is part of the individual wide
3816 // bytecode implementations
3817 }
3818
3819
3820 // Multi arrays
multianewarray()3821 void TemplateTable::multianewarray() {
3822 transition(vtos, atos);
3823 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3824 // last dim is on top of stack; we want address of first one:
3825 // first_addr = last_addr + (ndims - 1) * wordSize
3826 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
3827 call_VM(rax,
3828 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
3829 c_rarg1);
3830 __ load_unsigned_byte(rbx, at_bcp(3));
3831 __ lea(rsp, Address(rsp, rbx, Address::times_8));
3832 }
3833 #endif // !CC_INTERP
3834