1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "compiler/disassembler.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "interpreter/interp_masm.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/safepointMechanism.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "runtime/synchronizer.hpp"
42 #include "utilities/macros.hpp"
43
44 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
45
46 // Global Register Names
47 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
48 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
49
50 // Platform-dependent initialization
pd_initialize()51 void TemplateTable::pd_initialize() {
52 // No x86 specific initialization
53 }
54
55 // Address Computation: local variables
iaddress(int n)56 static inline Address iaddress(int n) {
57 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
58 }
59
laddress(int n)60 static inline Address laddress(int n) {
61 return iaddress(n + 1);
62 }
63
64 #ifndef _LP64
haddress(int n)65 static inline Address haddress(int n) {
66 return iaddress(n + 0);
67 }
68 #endif
69
faddress(int n)70 static inline Address faddress(int n) {
71 return iaddress(n);
72 }
73
daddress(int n)74 static inline Address daddress(int n) {
75 return laddress(n);
76 }
77
aaddress(int n)78 static inline Address aaddress(int n) {
79 return iaddress(n);
80 }
81
iaddress(Register r)82 static inline Address iaddress(Register r) {
83 return Address(rlocals, r, Address::times_ptr);
84 }
85
laddress(Register r)86 static inline Address laddress(Register r) {
87 return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
88 }
89
90 #ifndef _LP64
haddress(Register r)91 static inline Address haddress(Register r) {
92 return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
93 }
94 #endif
95
faddress(Register r)96 static inline Address faddress(Register r) {
97 return iaddress(r);
98 }
99
daddress(Register r)100 static inline Address daddress(Register r) {
101 return laddress(r);
102 }
103
aaddress(Register r)104 static inline Address aaddress(Register r) {
105 return iaddress(r);
106 }
107
108
109 // expression stack
110 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
111 // data beyond the rsp which is potentially unsafe in an MT environment;
112 // an interrupt may overwrite that data.)
at_rsp()113 static inline Address at_rsp () {
114 return Address(rsp, 0);
115 }
116
117 // At top of Java expression stack which may be different than esp(). It
118 // isn't for category 1 objects.
at_tos()119 static inline Address at_tos () {
120 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
121 }
122
at_tos_p1()123 static inline Address at_tos_p1() {
124 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
125 }
126
at_tos_p2()127 static inline Address at_tos_p2() {
128 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
129 }
130
131 // Condition conversion
j_not(TemplateTable::Condition cc)132 static Assembler::Condition j_not(TemplateTable::Condition cc) {
133 switch (cc) {
134 case TemplateTable::equal : return Assembler::notEqual;
135 case TemplateTable::not_equal : return Assembler::equal;
136 case TemplateTable::less : return Assembler::greaterEqual;
137 case TemplateTable::less_equal : return Assembler::greater;
138 case TemplateTable::greater : return Assembler::lessEqual;
139 case TemplateTable::greater_equal: return Assembler::less;
140 }
141 ShouldNotReachHere();
142 return Assembler::zero;
143 }
144
145
146
147 // Miscelaneous helper routines
148 // Store an oop (or NULL) at the address described by obj.
149 // If val == noreg this means store a NULL
150
151
do_oop_store(InterpreterMacroAssembler * _masm,Address dst,Register val,DecoratorSet decorators=0)152 static void do_oop_store(InterpreterMacroAssembler* _masm,
153 Address dst,
154 Register val,
155 DecoratorSet decorators = 0) {
156 assert(val == noreg || val == rax, "parameter is just for looks");
157 __ store_heap_oop(dst, val, rdx, rbx, decorators);
158 }
159
do_oop_load(InterpreterMacroAssembler * _masm,Address src,Register dst,DecoratorSet decorators=0)160 static void do_oop_load(InterpreterMacroAssembler* _masm,
161 Address src,
162 Register dst,
163 DecoratorSet decorators = 0) {
164 __ load_heap_oop(dst, src, rdx, rbx, decorators);
165 }
166
at_bcp(int offset)167 Address TemplateTable::at_bcp(int offset) {
168 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
169 return Address(rbcp, offset);
170 }
171
172
patch_bytecode(Bytecodes::Code bc,Register bc_reg,Register temp_reg,bool load_bc_into_bc_reg,int byte_no)173 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
174 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
175 int byte_no) {
176 if (!RewriteBytecodes) return;
177 Label L_patch_done;
178
179 switch (bc) {
180 case Bytecodes::_fast_aputfield:
181 case Bytecodes::_fast_bputfield:
182 case Bytecodes::_fast_zputfield:
183 case Bytecodes::_fast_cputfield:
184 case Bytecodes::_fast_dputfield:
185 case Bytecodes::_fast_fputfield:
186 case Bytecodes::_fast_iputfield:
187 case Bytecodes::_fast_lputfield:
188 case Bytecodes::_fast_sputfield:
189 {
190 // We skip bytecode quickening for putfield instructions when
191 // the put_code written to the constant pool cache is zero.
192 // This is required so that every execution of this instruction
193 // calls out to InterpreterRuntime::resolve_get_put to do
194 // additional, required work.
195 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
196 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
197 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
198 __ movl(bc_reg, bc);
199 __ cmpl(temp_reg, (int) 0);
200 __ jcc(Assembler::zero, L_patch_done); // don't patch
201 }
202 break;
203 default:
204 assert(byte_no == -1, "sanity");
205 // the pair bytecodes have already done the load.
206 if (load_bc_into_bc_reg) {
207 __ movl(bc_reg, bc);
208 }
209 }
210
211 if (JvmtiExport::can_post_breakpoint()) {
212 Label L_fast_patch;
213 // if a breakpoint is present we can't rewrite the stream directly
214 __ movzbl(temp_reg, at_bcp(0));
215 __ cmpl(temp_reg, Bytecodes::_breakpoint);
216 __ jcc(Assembler::notEqual, L_fast_patch);
217 __ get_method(temp_reg);
218 // Let breakpoint table handling rewrite to quicker bytecode
219 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
220 #ifndef ASSERT
221 __ jmpb(L_patch_done);
222 #else
223 __ jmp(L_patch_done);
224 #endif
225 __ bind(L_fast_patch);
226 }
227
228 #ifdef ASSERT
229 Label L_okay;
230 __ load_unsigned_byte(temp_reg, at_bcp(0));
231 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
232 __ jcc(Assembler::equal, L_okay);
233 __ cmpl(temp_reg, bc_reg);
234 __ jcc(Assembler::equal, L_okay);
235 __ stop("patching the wrong bytecode");
236 __ bind(L_okay);
237 #endif
238
239 // patch bytecode
240 __ movb(at_bcp(0), bc_reg);
241 __ bind(L_patch_done);
242 }
243 // Individual instructions
244
245
nop()246 void TemplateTable::nop() {
247 transition(vtos, vtos);
248 // nothing to do
249 }
250
shouldnotreachhere()251 void TemplateTable::shouldnotreachhere() {
252 transition(vtos, vtos);
253 __ stop("shouldnotreachhere bytecode");
254 }
255
aconst_null()256 void TemplateTable::aconst_null() {
257 transition(vtos, atos);
258 __ xorl(rax, rax);
259 }
260
iconst(int value)261 void TemplateTable::iconst(int value) {
262 transition(vtos, itos);
263 if (value == 0) {
264 __ xorl(rax, rax);
265 } else {
266 __ movl(rax, value);
267 }
268 }
269
lconst(int value)270 void TemplateTable::lconst(int value) {
271 transition(vtos, ltos);
272 if (value == 0) {
273 __ xorl(rax, rax);
274 } else {
275 __ movl(rax, value);
276 }
277 #ifndef _LP64
278 assert(value >= 0, "check this code");
279 __ xorptr(rdx, rdx);
280 #endif
281 }
282
283
284
fconst(int value)285 void TemplateTable::fconst(int value) {
286 transition(vtos, ftos);
287 if (UseSSE >= 1) {
288 static float one = 1.0f, two = 2.0f;
289 switch (value) {
290 case 0:
291 __ xorps(xmm0, xmm0);
292 break;
293 case 1:
294 __ movflt(xmm0, ExternalAddress((address) &one));
295 break;
296 case 2:
297 __ movflt(xmm0, ExternalAddress((address) &two));
298 break;
299 default:
300 ShouldNotReachHere();
301 break;
302 }
303 } else {
304 #ifdef _LP64
305 ShouldNotReachHere();
306 #else
307 if (value == 0) { __ fldz();
308 } else if (value == 1) { __ fld1();
309 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
310 } else { ShouldNotReachHere();
311 }
312 #endif // _LP64
313 }
314 }
315
dconst(int value)316 void TemplateTable::dconst(int value) {
317 transition(vtos, dtos);
318 if (UseSSE >= 2) {
319 static double one = 1.0;
320 switch (value) {
321 case 0:
322 __ xorpd(xmm0, xmm0);
323 break;
324 case 1:
325 __ movdbl(xmm0, ExternalAddress((address) &one));
326 break;
327 default:
328 ShouldNotReachHere();
329 break;
330 }
331 } else {
332 #ifdef _LP64
333 ShouldNotReachHere();
334 #else
335 if (value == 0) { __ fldz();
336 } else if (value == 1) { __ fld1();
337 } else { ShouldNotReachHere();
338 }
339 #endif
340 }
341 }
342
bipush()343 void TemplateTable::bipush() {
344 transition(vtos, itos);
345 __ load_signed_byte(rax, at_bcp(1));
346 }
347
sipush()348 void TemplateTable::sipush() {
349 transition(vtos, itos);
350 __ load_unsigned_short(rax, at_bcp(1));
351 __ bswapl(rax);
352 __ sarl(rax, 16);
353 }
354
ldc(bool wide)355 void TemplateTable::ldc(bool wide) {
356 transition(vtos, vtos);
357 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
358 Label call_ldc, notFloat, notClass, notInt, Done;
359
360 if (wide) {
361 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
362 } else {
363 __ load_unsigned_byte(rbx, at_bcp(1));
364 }
365
366 __ get_cpool_and_tags(rcx, rax);
367 const int base_offset = ConstantPool::header_size() * wordSize;
368 const int tags_offset = Array<u1>::base_offset_in_bytes();
369
370 // get type
371 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
372
373 // unresolved class - get the resolved class
374 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
375 __ jccb(Assembler::equal, call_ldc);
376
377 // unresolved class in error state - call into runtime to throw the error
378 // from the first resolution attempt
379 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
380 __ jccb(Assembler::equal, call_ldc);
381
382 // resolved class - need to call vm to get java mirror of the class
383 __ cmpl(rdx, JVM_CONSTANT_Class);
384 __ jcc(Assembler::notEqual, notClass);
385
386 __ bind(call_ldc);
387
388 __ movl(rarg, wide);
389 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
390
391 __ push(atos);
392 __ jmp(Done);
393
394 __ bind(notClass);
395 __ cmpl(rdx, JVM_CONSTANT_Float);
396 __ jccb(Assembler::notEqual, notFloat);
397
398 // ftos
399 __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
400 __ push(ftos);
401 __ jmp(Done);
402
403 __ bind(notFloat);
404 __ cmpl(rdx, JVM_CONSTANT_Integer);
405 __ jccb(Assembler::notEqual, notInt);
406
407 // itos
408 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
409 __ push(itos);
410 __ jmp(Done);
411
412 // assume the tag is for condy; if not, the VM runtime will tell us
413 __ bind(notInt);
414 condy_helper(Done);
415
416 __ bind(Done);
417 }
418
419 // Fast path for caching oop constants.
fast_aldc(bool wide)420 void TemplateTable::fast_aldc(bool wide) {
421 transition(vtos, atos);
422
423 Register result = rax;
424 Register tmp = rdx;
425 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
426 int index_size = wide ? sizeof(u2) : sizeof(u1);
427
428 Label resolved;
429
430 // We are resolved if the resolved reference cache entry contains a
431 // non-null object (String, MethodType, etc.)
432 assert_different_registers(result, tmp);
433 __ get_cache_index_at_bcp(tmp, 1, index_size);
434 __ load_resolved_reference_at_index(result, tmp);
435 __ testptr(result, result);
436 __ jcc(Assembler::notZero, resolved);
437
438 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
439
440 // first time invocation - must resolve first
441 __ movl(rarg, (int)bytecode());
442 __ call_VM(result, entry, rarg);
443 __ bind(resolved);
444
445 { // Check for the null sentinel.
446 // If we just called the VM, it already did the mapping for us,
447 // but it's harmless to retry.
448 Label notNull;
449 ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
450 __ movptr(tmp, null_sentinel);
451 __ cmpoop(tmp, result);
452 __ jccb(Assembler::notEqual, notNull);
453 __ xorptr(result, result); // NULL object reference
454 __ bind(notNull);
455 }
456
457 if (VerifyOops) {
458 __ verify_oop(result);
459 }
460 }
461
ldc2_w()462 void TemplateTable::ldc2_w() {
463 transition(vtos, vtos);
464 Label notDouble, notLong, Done;
465 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
466
467 __ get_cpool_and_tags(rcx, rax);
468 const int base_offset = ConstantPool::header_size() * wordSize;
469 const int tags_offset = Array<u1>::base_offset_in_bytes();
470
471 // get type
472 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
473 __ cmpl(rdx, JVM_CONSTANT_Double);
474 __ jccb(Assembler::notEqual, notDouble);
475
476 // dtos
477 __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
478 __ push(dtos);
479
480 __ jmp(Done);
481 __ bind(notDouble);
482 __ cmpl(rdx, JVM_CONSTANT_Long);
483 __ jccb(Assembler::notEqual, notLong);
484
485 // ltos
486 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
487 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
488 __ push(ltos);
489 __ jmp(Done);
490
491 __ bind(notLong);
492 condy_helper(Done);
493
494 __ bind(Done);
495 }
496
condy_helper(Label & Done)497 void TemplateTable::condy_helper(Label& Done) {
498 const Register obj = rax;
499 const Register off = rbx;
500 const Register flags = rcx;
501 const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
502 __ movl(rarg, (int)bytecode());
503 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
504 #ifndef _LP64
505 // borrow rdi from locals
506 __ get_thread(rdi);
507 __ get_vm_result_2(flags, rdi);
508 __ restore_locals();
509 #else
510 __ get_vm_result_2(flags, r15_thread);
511 #endif
512 // VMr = obj = base address to find primitive value to push
513 // VMr2 = flags = (tos, off) using format of CPCE::_flags
514 __ movl(off, flags);
515 __ andl(off, ConstantPoolCacheEntry::field_index_mask);
516 const Address field(obj, off, Address::times_1, 0*wordSize);
517
518 // What sort of thing are we loading?
519 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
520 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
521
522 switch (bytecode()) {
523 case Bytecodes::_ldc:
524 case Bytecodes::_ldc_w:
525 {
526 // tos in (itos, ftos, stos, btos, ctos, ztos)
527 Label notInt, notFloat, notShort, notByte, notChar, notBool;
528 __ cmpl(flags, itos);
529 __ jcc(Assembler::notEqual, notInt);
530 // itos
531 __ movl(rax, field);
532 __ push(itos);
533 __ jmp(Done);
534
535 __ bind(notInt);
536 __ cmpl(flags, ftos);
537 __ jcc(Assembler::notEqual, notFloat);
538 // ftos
539 __ load_float(field);
540 __ push(ftos);
541 __ jmp(Done);
542
543 __ bind(notFloat);
544 __ cmpl(flags, stos);
545 __ jcc(Assembler::notEqual, notShort);
546 // stos
547 __ load_signed_short(rax, field);
548 __ push(stos);
549 __ jmp(Done);
550
551 __ bind(notShort);
552 __ cmpl(flags, btos);
553 __ jcc(Assembler::notEqual, notByte);
554 // btos
555 __ load_signed_byte(rax, field);
556 __ push(btos);
557 __ jmp(Done);
558
559 __ bind(notByte);
560 __ cmpl(flags, ctos);
561 __ jcc(Assembler::notEqual, notChar);
562 // ctos
563 __ load_unsigned_short(rax, field);
564 __ push(ctos);
565 __ jmp(Done);
566
567 __ bind(notChar);
568 __ cmpl(flags, ztos);
569 __ jcc(Assembler::notEqual, notBool);
570 // ztos
571 __ load_signed_byte(rax, field);
572 __ push(ztos);
573 __ jmp(Done);
574
575 __ bind(notBool);
576 break;
577 }
578
579 case Bytecodes::_ldc2_w:
580 {
581 Label notLong, notDouble;
582 __ cmpl(flags, ltos);
583 __ jcc(Assembler::notEqual, notLong);
584 // ltos
585 // Loading high word first because movptr clobbers rax
586 NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
587 __ movptr(rax, field);
588 __ push(ltos);
589 __ jmp(Done);
590
591 __ bind(notLong);
592 __ cmpl(flags, dtos);
593 __ jcc(Assembler::notEqual, notDouble);
594 // dtos
595 __ load_double(field);
596 __ push(dtos);
597 __ jmp(Done);
598
599 __ bind(notDouble);
600 break;
601 }
602
603 default:
604 ShouldNotReachHere();
605 }
606
607 __ stop("bad ldc/condy");
608 }
609
locals_index(Register reg,int offset)610 void TemplateTable::locals_index(Register reg, int offset) {
611 __ load_unsigned_byte(reg, at_bcp(offset));
612 __ negptr(reg);
613 }
614
iload()615 void TemplateTable::iload() {
616 iload_internal();
617 }
618
nofast_iload()619 void TemplateTable::nofast_iload() {
620 iload_internal(may_not_rewrite);
621 }
622
iload_internal(RewriteControl rc)623 void TemplateTable::iload_internal(RewriteControl rc) {
624 transition(vtos, itos);
625 if (RewriteFrequentPairs && rc == may_rewrite) {
626 Label rewrite, done;
627 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
628 LP64_ONLY(assert(rbx != bc, "register damaged"));
629
630 // get next byte
631 __ load_unsigned_byte(rbx,
632 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
633 // if _iload, wait to rewrite to iload2. We only want to rewrite the
634 // last two iloads in a pair. Comparing against fast_iload means that
635 // the next bytecode is neither an iload or a caload, and therefore
636 // an iload pair.
637 __ cmpl(rbx, Bytecodes::_iload);
638 __ jcc(Assembler::equal, done);
639
640 __ cmpl(rbx, Bytecodes::_fast_iload);
641 __ movl(bc, Bytecodes::_fast_iload2);
642
643 __ jccb(Assembler::equal, rewrite);
644
645 // if _caload, rewrite to fast_icaload
646 __ cmpl(rbx, Bytecodes::_caload);
647 __ movl(bc, Bytecodes::_fast_icaload);
648 __ jccb(Assembler::equal, rewrite);
649
650 // rewrite so iload doesn't check again.
651 __ movl(bc, Bytecodes::_fast_iload);
652
653 // rewrite
654 // bc: fast bytecode
655 __ bind(rewrite);
656 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
657 __ bind(done);
658 }
659
660 // Get the local value into tos
661 locals_index(rbx);
662 __ movl(rax, iaddress(rbx));
663 }
664
fast_iload2()665 void TemplateTable::fast_iload2() {
666 transition(vtos, itos);
667 locals_index(rbx);
668 __ movl(rax, iaddress(rbx));
669 __ push(itos);
670 locals_index(rbx, 3);
671 __ movl(rax, iaddress(rbx));
672 }
673
fast_iload()674 void TemplateTable::fast_iload() {
675 transition(vtos, itos);
676 locals_index(rbx);
677 __ movl(rax, iaddress(rbx));
678 }
679
lload()680 void TemplateTable::lload() {
681 transition(vtos, ltos);
682 locals_index(rbx);
683 __ movptr(rax, laddress(rbx));
684 NOT_LP64(__ movl(rdx, haddress(rbx)));
685 }
686
fload()687 void TemplateTable::fload() {
688 transition(vtos, ftos);
689 locals_index(rbx);
690 __ load_float(faddress(rbx));
691 }
692
dload()693 void TemplateTable::dload() {
694 transition(vtos, dtos);
695 locals_index(rbx);
696 __ load_double(daddress(rbx));
697 }
698
aload()699 void TemplateTable::aload() {
700 transition(vtos, atos);
701 locals_index(rbx);
702 __ movptr(rax, aaddress(rbx));
703 }
704
locals_index_wide(Register reg)705 void TemplateTable::locals_index_wide(Register reg) {
706 __ load_unsigned_short(reg, at_bcp(2));
707 __ bswapl(reg);
708 __ shrl(reg, 16);
709 __ negptr(reg);
710 }
711
wide_iload()712 void TemplateTable::wide_iload() {
713 transition(vtos, itos);
714 locals_index_wide(rbx);
715 __ movl(rax, iaddress(rbx));
716 }
717
wide_lload()718 void TemplateTable::wide_lload() {
719 transition(vtos, ltos);
720 locals_index_wide(rbx);
721 __ movptr(rax, laddress(rbx));
722 NOT_LP64(__ movl(rdx, haddress(rbx)));
723 }
724
wide_fload()725 void TemplateTable::wide_fload() {
726 transition(vtos, ftos);
727 locals_index_wide(rbx);
728 __ load_float(faddress(rbx));
729 }
730
wide_dload()731 void TemplateTable::wide_dload() {
732 transition(vtos, dtos);
733 locals_index_wide(rbx);
734 __ load_double(daddress(rbx));
735 }
736
wide_aload()737 void TemplateTable::wide_aload() {
738 transition(vtos, atos);
739 locals_index_wide(rbx);
740 __ movptr(rax, aaddress(rbx));
741 }
742
index_check(Register array,Register index)743 void TemplateTable::index_check(Register array, Register index) {
744 // Pop ptr into array
745 __ pop_ptr(array);
746 index_check_without_pop(array, index);
747 }
748
index_check_without_pop(Register array,Register index)749 void TemplateTable::index_check_without_pop(Register array, Register index) {
750 // destroys rbx
751 // check array
752 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
753 // sign extend index for use by indexed load
754 __ movl2ptr(index, index);
755 // check index
756 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
757 if (index != rbx) {
758 // ??? convention: move aberrant index into rbx for exception message
759 assert(rbx != array, "different registers");
760 __ movl(rbx, index);
761 }
762 Label skip;
763 __ jccb(Assembler::below, skip);
764 // Pass array to create more detailed exceptions.
765 __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
766 __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
767 __ bind(skip);
768 }
769
iaload()770 void TemplateTable::iaload() {
771 transition(itos, itos);
772 // rax: index
773 // rdx: array
774 index_check(rdx, rax); // kills rbx
775 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
776 Address(rdx, rax, Address::times_4,
777 arrayOopDesc::base_offset_in_bytes(T_INT)),
778 noreg, noreg);
779 }
780
laload()781 void TemplateTable::laload() {
782 transition(itos, ltos);
783 // rax: index
784 // rdx: array
785 index_check(rdx, rax); // kills rbx
786 NOT_LP64(__ mov(rbx, rax));
787 // rbx,: index
788 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
789 Address(rdx, rbx, Address::times_8,
790 arrayOopDesc::base_offset_in_bytes(T_LONG)),
791 noreg, noreg);
792 }
793
794
795
faload()796 void TemplateTable::faload() {
797 transition(itos, ftos);
798 // rax: index
799 // rdx: array
800 index_check(rdx, rax); // kills rbx
801 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
802 Address(rdx, rax,
803 Address::times_4,
804 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
805 noreg, noreg);
806 }
807
daload()808 void TemplateTable::daload() {
809 transition(itos, dtos);
810 // rax: index
811 // rdx: array
812 index_check(rdx, rax); // kills rbx
813 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
814 Address(rdx, rax,
815 Address::times_8,
816 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
817 noreg, noreg);
818 }
819
aaload()820 void TemplateTable::aaload() {
821 transition(itos, atos);
822 // rax: index
823 // rdx: array
824 index_check(rdx, rax); // kills rbx
825 do_oop_load(_masm,
826 Address(rdx, rax,
827 UseCompressedOops ? Address::times_4 : Address::times_ptr,
828 arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
829 rax,
830 IS_ARRAY);
831 }
832
baload()833 void TemplateTable::baload() {
834 transition(itos, itos);
835 // rax: index
836 // rdx: array
837 index_check(rdx, rax); // kills rbx
838 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
839 Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
840 noreg, noreg);
841 }
842
caload()843 void TemplateTable::caload() {
844 transition(itos, itos);
845 // rax: index
846 // rdx: array
847 index_check(rdx, rax); // kills rbx
848 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
849 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
850 noreg, noreg);
851 }
852
853 // iload followed by caload frequent pair
fast_icaload()854 void TemplateTable::fast_icaload() {
855 transition(vtos, itos);
856 // load index out of locals
857 locals_index(rbx);
858 __ movl(rax, iaddress(rbx));
859
860 // rax: index
861 // rdx: array
862 index_check(rdx, rax); // kills rbx
863 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
864 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
865 noreg, noreg);
866 }
867
868
saload()869 void TemplateTable::saload() {
870 transition(itos, itos);
871 // rax: index
872 // rdx: array
873 index_check(rdx, rax); // kills rbx
874 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
875 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
876 noreg, noreg);
877 }
878
iload(int n)879 void TemplateTable::iload(int n) {
880 transition(vtos, itos);
881 __ movl(rax, iaddress(n));
882 }
883
lload(int n)884 void TemplateTable::lload(int n) {
885 transition(vtos, ltos);
886 __ movptr(rax, laddress(n));
887 NOT_LP64(__ movptr(rdx, haddress(n)));
888 }
889
fload(int n)890 void TemplateTable::fload(int n) {
891 transition(vtos, ftos);
892 __ load_float(faddress(n));
893 }
894
dload(int n)895 void TemplateTable::dload(int n) {
896 transition(vtos, dtos);
897 __ load_double(daddress(n));
898 }
899
aload(int n)900 void TemplateTable::aload(int n) {
901 transition(vtos, atos);
902 __ movptr(rax, aaddress(n));
903 }
904
aload_0()905 void TemplateTable::aload_0() {
906 aload_0_internal();
907 }
908
nofast_aload_0()909 void TemplateTable::nofast_aload_0() {
910 aload_0_internal(may_not_rewrite);
911 }
912
aload_0_internal(RewriteControl rc)913 void TemplateTable::aload_0_internal(RewriteControl rc) {
914 transition(vtos, atos);
915 // According to bytecode histograms, the pairs:
916 //
917 // _aload_0, _fast_igetfield
918 // _aload_0, _fast_agetfield
919 // _aload_0, _fast_fgetfield
920 //
921 // occur frequently. If RewriteFrequentPairs is set, the (slow)
922 // _aload_0 bytecode checks if the next bytecode is either
923 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
924 // rewrites the current bytecode into a pair bytecode; otherwise it
925 // rewrites the current bytecode into _fast_aload_0 that doesn't do
926 // the pair check anymore.
927 //
928 // Note: If the next bytecode is _getfield, the rewrite must be
929 // delayed, otherwise we may miss an opportunity for a pair.
930 //
931 // Also rewrite frequent pairs
932 // aload_0, aload_1
933 // aload_0, iload_1
934 // These bytecodes with a small amount of code are most profitable
935 // to rewrite
936 if (RewriteFrequentPairs && rc == may_rewrite) {
937 Label rewrite, done;
938
939 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
940 LP64_ONLY(assert(rbx != bc, "register damaged"));
941
942 // get next byte
943 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
944
945 // if _getfield then wait with rewrite
946 __ cmpl(rbx, Bytecodes::_getfield);
947 __ jcc(Assembler::equal, done);
948
949 // if _igetfield then rewrite to _fast_iaccess_0
950 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
951 __ cmpl(rbx, Bytecodes::_fast_igetfield);
952 __ movl(bc, Bytecodes::_fast_iaccess_0);
953 __ jccb(Assembler::equal, rewrite);
954
955 // if _agetfield then rewrite to _fast_aaccess_0
956 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
957 __ cmpl(rbx, Bytecodes::_fast_agetfield);
958 __ movl(bc, Bytecodes::_fast_aaccess_0);
959 __ jccb(Assembler::equal, rewrite);
960
961 // if _fgetfield then rewrite to _fast_faccess_0
962 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
963 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
964 __ movl(bc, Bytecodes::_fast_faccess_0);
965 __ jccb(Assembler::equal, rewrite);
966
967 // else rewrite to _fast_aload0
968 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
969 __ movl(bc, Bytecodes::_fast_aload_0);
970
971 // rewrite
972 // bc: fast bytecode
973 __ bind(rewrite);
974 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
975
976 __ bind(done);
977 }
978
979 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
980 aload(0);
981 }
982
istore()983 void TemplateTable::istore() {
984 transition(itos, vtos);
985 locals_index(rbx);
986 __ movl(iaddress(rbx), rax);
987 }
988
989
lstore()990 void TemplateTable::lstore() {
991 transition(ltos, vtos);
992 locals_index(rbx);
993 __ movptr(laddress(rbx), rax);
994 NOT_LP64(__ movptr(haddress(rbx), rdx));
995 }
996
fstore()997 void TemplateTable::fstore() {
998 transition(ftos, vtos);
999 locals_index(rbx);
1000 __ store_float(faddress(rbx));
1001 }
1002
dstore()1003 void TemplateTable::dstore() {
1004 transition(dtos, vtos);
1005 locals_index(rbx);
1006 __ store_double(daddress(rbx));
1007 }
1008
astore()1009 void TemplateTable::astore() {
1010 transition(vtos, vtos);
1011 __ pop_ptr(rax);
1012 locals_index(rbx);
1013 __ movptr(aaddress(rbx), rax);
1014 }
1015
wide_istore()1016 void TemplateTable::wide_istore() {
1017 transition(vtos, vtos);
1018 __ pop_i();
1019 locals_index_wide(rbx);
1020 __ movl(iaddress(rbx), rax);
1021 }
1022
wide_lstore()1023 void TemplateTable::wide_lstore() {
1024 transition(vtos, vtos);
1025 NOT_LP64(__ pop_l(rax, rdx));
1026 LP64_ONLY(__ pop_l());
1027 locals_index_wide(rbx);
1028 __ movptr(laddress(rbx), rax);
1029 NOT_LP64(__ movl(haddress(rbx), rdx));
1030 }
1031
wide_fstore()1032 void TemplateTable::wide_fstore() {
1033 #ifdef _LP64
1034 transition(vtos, vtos);
1035 __ pop_f(xmm0);
1036 locals_index_wide(rbx);
1037 __ movflt(faddress(rbx), xmm0);
1038 #else
1039 wide_istore();
1040 #endif
1041 }
1042
wide_dstore()1043 void TemplateTable::wide_dstore() {
1044 #ifdef _LP64
1045 transition(vtos, vtos);
1046 __ pop_d(xmm0);
1047 locals_index_wide(rbx);
1048 __ movdbl(daddress(rbx), xmm0);
1049 #else
1050 wide_lstore();
1051 #endif
1052 }
1053
wide_astore()1054 void TemplateTable::wide_astore() {
1055 transition(vtos, vtos);
1056 __ pop_ptr(rax);
1057 locals_index_wide(rbx);
1058 __ movptr(aaddress(rbx), rax);
1059 }
1060
iastore()1061 void TemplateTable::iastore() {
1062 transition(itos, vtos);
1063 __ pop_i(rbx);
1064 // rax: value
1065 // rbx: index
1066 // rdx: array
1067 index_check(rdx, rbx); // prefer index in rbx
1068 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1069 Address(rdx, rbx, Address::times_4,
1070 arrayOopDesc::base_offset_in_bytes(T_INT)),
1071 rax, noreg, noreg);
1072 }
1073
lastore()1074 void TemplateTable::lastore() {
1075 transition(ltos, vtos);
1076 __ pop_i(rbx);
1077 // rax,: low(value)
1078 // rcx: array
1079 // rdx: high(value)
1080 index_check(rcx, rbx); // prefer index in rbx,
1081 // rbx,: index
1082 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1083 Address(rcx, rbx, Address::times_8,
1084 arrayOopDesc::base_offset_in_bytes(T_LONG)),
1085 noreg /* ltos */, noreg, noreg);
1086 }
1087
1088
fastore()1089 void TemplateTable::fastore() {
1090 transition(ftos, vtos);
1091 __ pop_i(rbx);
1092 // value is in UseSSE >= 1 ? xmm0 : ST(0)
1093 // rbx: index
1094 // rdx: array
1095 index_check(rdx, rbx); // prefer index in rbx
1096 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1097 Address(rdx, rbx, Address::times_4,
1098 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1099 noreg /* ftos */, noreg, noreg);
1100 }
1101
dastore()1102 void TemplateTable::dastore() {
1103 transition(dtos, vtos);
1104 __ pop_i(rbx);
1105 // value is in UseSSE >= 2 ? xmm0 : ST(0)
1106 // rbx: index
1107 // rdx: array
1108 index_check(rdx, rbx); // prefer index in rbx
1109 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1110 Address(rdx, rbx, Address::times_8,
1111 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1112 noreg /* dtos */, noreg, noreg);
1113 }
1114
aastore()1115 void TemplateTable::aastore() {
1116 Label is_null, ok_is_subtype, done;
1117 transition(vtos, vtos);
1118 // stack: ..., array, index, value
1119 __ movptr(rax, at_tos()); // value
1120 __ movl(rcx, at_tos_p1()); // index
1121 __ movptr(rdx, at_tos_p2()); // array
1122
1123 Address element_address(rdx, rcx,
1124 UseCompressedOops? Address::times_4 : Address::times_ptr,
1125 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1126
1127 index_check_without_pop(rdx, rcx); // kills rbx
1128 __ testptr(rax, rax);
1129 __ jcc(Assembler::zero, is_null);
1130
1131 // Move subklass into rbx
1132 __ load_klass(rbx, rax);
1133 // Move superklass into rax
1134 __ load_klass(rax, rdx);
1135 __ movptr(rax, Address(rax,
1136 ObjArrayKlass::element_klass_offset()));
1137
1138 // Generate subtype check. Blows rcx, rdi
1139 // Superklass in rax. Subklass in rbx.
1140 __ gen_subtype_check(rbx, ok_is_subtype);
1141
1142 // Come here on failure
1143 // object is at TOS
1144 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1145
1146 // Come here on success
1147 __ bind(ok_is_subtype);
1148
1149 // Get the value we will store
1150 __ movptr(rax, at_tos());
1151 __ movl(rcx, at_tos_p1()); // index
1152 // Now store using the appropriate barrier
1153 do_oop_store(_masm, element_address, rax, IS_ARRAY);
1154 __ jmp(done);
1155
1156 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1157 __ bind(is_null);
1158 __ profile_null_seen(rbx);
1159
1160 // Store a NULL
1161 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1162
1163 // Pop stack arguments
1164 __ bind(done);
1165 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1166 }
1167
bastore()1168 void TemplateTable::bastore() {
1169 transition(itos, vtos);
1170 __ pop_i(rbx);
1171 // rax: value
1172 // rbx: index
1173 // rdx: array
1174 index_check(rdx, rbx); // prefer index in rbx
1175 // Need to check whether array is boolean or byte
1176 // since both types share the bastore bytecode.
1177 __ load_klass(rcx, rdx);
1178 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1179 int diffbit = Klass::layout_helper_boolean_diffbit();
1180 __ testl(rcx, diffbit);
1181 Label L_skip;
1182 __ jccb(Assembler::zero, L_skip);
1183 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1184 __ bind(L_skip);
1185 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1186 Address(rdx, rbx,Address::times_1,
1187 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1188 rax, noreg, noreg);
1189 }
1190
castore()1191 void TemplateTable::castore() {
1192 transition(itos, vtos);
1193 __ pop_i(rbx);
1194 // rax: value
1195 // rbx: index
1196 // rdx: array
1197 index_check(rdx, rbx); // prefer index in rbx
1198 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1199 Address(rdx, rbx, Address::times_2,
1200 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1201 rax, noreg, noreg);
1202 }
1203
1204
sastore()1205 void TemplateTable::sastore() {
1206 castore();
1207 }
1208
istore(int n)1209 void TemplateTable::istore(int n) {
1210 transition(itos, vtos);
1211 __ movl(iaddress(n), rax);
1212 }
1213
lstore(int n)1214 void TemplateTable::lstore(int n) {
1215 transition(ltos, vtos);
1216 __ movptr(laddress(n), rax);
1217 NOT_LP64(__ movptr(haddress(n), rdx));
1218 }
1219
fstore(int n)1220 void TemplateTable::fstore(int n) {
1221 transition(ftos, vtos);
1222 __ store_float(faddress(n));
1223 }
1224
dstore(int n)1225 void TemplateTable::dstore(int n) {
1226 transition(dtos, vtos);
1227 __ store_double(daddress(n));
1228 }
1229
1230
astore(int n)1231 void TemplateTable::astore(int n) {
1232 transition(vtos, vtos);
1233 __ pop_ptr(rax);
1234 __ movptr(aaddress(n), rax);
1235 }
1236
pop()1237 void TemplateTable::pop() {
1238 transition(vtos, vtos);
1239 __ addptr(rsp, Interpreter::stackElementSize);
1240 }
1241
pop2()1242 void TemplateTable::pop2() {
1243 transition(vtos, vtos);
1244 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1245 }
1246
1247
dup()1248 void TemplateTable::dup() {
1249 transition(vtos, vtos);
1250 __ load_ptr(0, rax);
1251 __ push_ptr(rax);
1252 // stack: ..., a, a
1253 }
1254
dup_x1()1255 void TemplateTable::dup_x1() {
1256 transition(vtos, vtos);
1257 // stack: ..., a, b
1258 __ load_ptr( 0, rax); // load b
1259 __ load_ptr( 1, rcx); // load a
1260 __ store_ptr(1, rax); // store b
1261 __ store_ptr(0, rcx); // store a
1262 __ push_ptr(rax); // push b
1263 // stack: ..., b, a, b
1264 }
1265
dup_x2()1266 void TemplateTable::dup_x2() {
1267 transition(vtos, vtos);
1268 // stack: ..., a, b, c
1269 __ load_ptr( 0, rax); // load c
1270 __ load_ptr( 2, rcx); // load a
1271 __ store_ptr(2, rax); // store c in a
1272 __ push_ptr(rax); // push c
1273 // stack: ..., c, b, c, c
1274 __ load_ptr( 2, rax); // load b
1275 __ store_ptr(2, rcx); // store a in b
1276 // stack: ..., c, a, c, c
1277 __ store_ptr(1, rax); // store b in c
1278 // stack: ..., c, a, b, c
1279 }
1280
dup2()1281 void TemplateTable::dup2() {
1282 transition(vtos, vtos);
1283 // stack: ..., a, b
1284 __ load_ptr(1, rax); // load a
1285 __ push_ptr(rax); // push a
1286 __ load_ptr(1, rax); // load b
1287 __ push_ptr(rax); // push b
1288 // stack: ..., a, b, a, b
1289 }
1290
1291
dup2_x1()1292 void TemplateTable::dup2_x1() {
1293 transition(vtos, vtos);
1294 // stack: ..., a, b, c
1295 __ load_ptr( 0, rcx); // load c
1296 __ load_ptr( 1, rax); // load b
1297 __ push_ptr(rax); // push b
1298 __ push_ptr(rcx); // push c
1299 // stack: ..., a, b, c, b, c
1300 __ store_ptr(3, rcx); // store c in b
1301 // stack: ..., a, c, c, b, c
1302 __ load_ptr( 4, rcx); // load a
1303 __ store_ptr(2, rcx); // store a in 2nd c
1304 // stack: ..., a, c, a, b, c
1305 __ store_ptr(4, rax); // store b in a
1306 // stack: ..., b, c, a, b, c
1307 }
1308
dup2_x2()1309 void TemplateTable::dup2_x2() {
1310 transition(vtos, vtos);
1311 // stack: ..., a, b, c, d
1312 __ load_ptr( 0, rcx); // load d
1313 __ load_ptr( 1, rax); // load c
1314 __ push_ptr(rax); // push c
1315 __ push_ptr(rcx); // push d
1316 // stack: ..., a, b, c, d, c, d
1317 __ load_ptr( 4, rax); // load b
1318 __ store_ptr(2, rax); // store b in d
1319 __ store_ptr(4, rcx); // store d in b
1320 // stack: ..., a, d, c, b, c, d
1321 __ load_ptr( 5, rcx); // load a
1322 __ load_ptr( 3, rax); // load c
1323 __ store_ptr(3, rcx); // store a in c
1324 __ store_ptr(5, rax); // store c in a
1325 // stack: ..., c, d, a, b, c, d
1326 }
1327
swap()1328 void TemplateTable::swap() {
1329 transition(vtos, vtos);
1330 // stack: ..., a, b
1331 __ load_ptr( 1, rcx); // load a
1332 __ load_ptr( 0, rax); // load b
1333 __ store_ptr(0, rcx); // store a in b
1334 __ store_ptr(1, rax); // store b in a
1335 // stack: ..., b, a
1336 }
1337
iop2(Operation op)1338 void TemplateTable::iop2(Operation op) {
1339 transition(itos, itos);
1340 switch (op) {
1341 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1342 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1343 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1344 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1345 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1346 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1347 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1348 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1349 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1350 default : ShouldNotReachHere();
1351 }
1352 }
1353
lop2(Operation op)1354 void TemplateTable::lop2(Operation op) {
1355 transition(ltos, ltos);
1356 #ifdef _LP64
1357 switch (op) {
1358 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1359 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1360 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1361 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1362 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1363 default : ShouldNotReachHere();
1364 }
1365 #else
1366 __ pop_l(rbx, rcx);
1367 switch (op) {
1368 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1369 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1370 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1371 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1372 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1373 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1374 default : ShouldNotReachHere();
1375 }
1376 #endif
1377 }
1378
idiv()1379 void TemplateTable::idiv() {
1380 transition(itos, itos);
1381 __ movl(rcx, rax);
1382 __ pop_i(rax);
1383 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1384 // they are not equal, one could do a normal division (no correction
1385 // needed), which may speed up this implementation for the common case.
1386 // (see also JVM spec., p.243 & p.271)
1387 __ corrected_idivl(rcx);
1388 }
1389
irem()1390 void TemplateTable::irem() {
1391 transition(itos, itos);
1392 __ movl(rcx, rax);
1393 __ pop_i(rax);
1394 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1395 // they are not equal, one could do a normal division (no correction
1396 // needed), which may speed up this implementation for the common case.
1397 // (see also JVM spec., p.243 & p.271)
1398 __ corrected_idivl(rcx);
1399 __ movl(rax, rdx);
1400 }
1401
lmul()1402 void TemplateTable::lmul() {
1403 transition(ltos, ltos);
1404 #ifdef _LP64
1405 __ pop_l(rdx);
1406 __ imulq(rax, rdx);
1407 #else
1408 __ pop_l(rbx, rcx);
1409 __ push(rcx); __ push(rbx);
1410 __ push(rdx); __ push(rax);
1411 __ lmul(2 * wordSize, 0);
1412 __ addptr(rsp, 4 * wordSize); // take off temporaries
1413 #endif
1414 }
1415
ldiv()1416 void TemplateTable::ldiv() {
1417 transition(ltos, ltos);
1418 #ifdef _LP64
1419 __ mov(rcx, rax);
1420 __ pop_l(rax);
1421 // generate explicit div0 check
1422 __ testq(rcx, rcx);
1423 __ jump_cc(Assembler::zero,
1424 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1425 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1426 // they are not equal, one could do a normal division (no correction
1427 // needed), which may speed up this implementation for the common case.
1428 // (see also JVM spec., p.243 & p.271)
1429 __ corrected_idivq(rcx); // kills rbx
1430 #else
1431 __ pop_l(rbx, rcx);
1432 __ push(rcx); __ push(rbx);
1433 __ push(rdx); __ push(rax);
1434 // check if y = 0
1435 __ orl(rax, rdx);
1436 __ jump_cc(Assembler::zero,
1437 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1438 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1439 __ addptr(rsp, 4 * wordSize); // take off temporaries
1440 #endif
1441 }
1442
lrem()1443 void TemplateTable::lrem() {
1444 transition(ltos, ltos);
1445 #ifdef _LP64
1446 __ mov(rcx, rax);
1447 __ pop_l(rax);
1448 __ testq(rcx, rcx);
1449 __ jump_cc(Assembler::zero,
1450 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1451 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1452 // they are not equal, one could do a normal division (no correction
1453 // needed), which may speed up this implementation for the common case.
1454 // (see also JVM spec., p.243 & p.271)
1455 __ corrected_idivq(rcx); // kills rbx
1456 __ mov(rax, rdx);
1457 #else
1458 __ pop_l(rbx, rcx);
1459 __ push(rcx); __ push(rbx);
1460 __ push(rdx); __ push(rax);
1461 // check if y = 0
1462 __ orl(rax, rdx);
1463 __ jump_cc(Assembler::zero,
1464 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1465 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1466 __ addptr(rsp, 4 * wordSize);
1467 #endif
1468 }
1469
lshl()1470 void TemplateTable::lshl() {
1471 transition(itos, ltos);
1472 __ movl(rcx, rax); // get shift count
1473 #ifdef _LP64
1474 __ pop_l(rax); // get shift value
1475 __ shlq(rax);
1476 #else
1477 __ pop_l(rax, rdx); // get shift value
1478 __ lshl(rdx, rax);
1479 #endif
1480 }
1481
lshr()1482 void TemplateTable::lshr() {
1483 #ifdef _LP64
1484 transition(itos, ltos);
1485 __ movl(rcx, rax); // get shift count
1486 __ pop_l(rax); // get shift value
1487 __ sarq(rax);
1488 #else
1489 transition(itos, ltos);
1490 __ mov(rcx, rax); // get shift count
1491 __ pop_l(rax, rdx); // get shift value
1492 __ lshr(rdx, rax, true);
1493 #endif
1494 }
1495
lushr()1496 void TemplateTable::lushr() {
1497 transition(itos, ltos);
1498 #ifdef _LP64
1499 __ movl(rcx, rax); // get shift count
1500 __ pop_l(rax); // get shift value
1501 __ shrq(rax);
1502 #else
1503 __ mov(rcx, rax); // get shift count
1504 __ pop_l(rax, rdx); // get shift value
1505 __ lshr(rdx, rax);
1506 #endif
1507 }
1508
fop2(Operation op)1509 void TemplateTable::fop2(Operation op) {
1510 transition(ftos, ftos);
1511
1512 if (UseSSE >= 1) {
1513 switch (op) {
1514 case add:
1515 __ addss(xmm0, at_rsp());
1516 __ addptr(rsp, Interpreter::stackElementSize);
1517 break;
1518 case sub:
1519 __ movflt(xmm1, xmm0);
1520 __ pop_f(xmm0);
1521 __ subss(xmm0, xmm1);
1522 break;
1523 case mul:
1524 __ mulss(xmm0, at_rsp());
1525 __ addptr(rsp, Interpreter::stackElementSize);
1526 break;
1527 case div:
1528 __ movflt(xmm1, xmm0);
1529 __ pop_f(xmm0);
1530 __ divss(xmm0, xmm1);
1531 break;
1532 case rem:
1533 // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1534 // modulo operation. The frem method calls the function
1535 // double fmod(double x, double y) in math.h. The documentation of fmod states:
1536 // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1537 // (signalling or quiet) is returned.
1538 //
1539 // On x86_32 platforms the FPU is used to perform the modulo operation. The
1540 // reason is that on 32-bit Windows the sign of modulo operations diverges from
1541 // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1542 // The fprem instruction used on x86_32 is functionally equivalent to
1543 // SharedRuntime::frem in that it returns a NaN.
1544 #ifdef _LP64
1545 __ movflt(xmm1, xmm0);
1546 __ pop_f(xmm0);
1547 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1548 #else
1549 __ push_f(xmm0);
1550 __ pop_f();
1551 __ fld_s(at_rsp());
1552 __ fremr(rax);
1553 __ f2ieee();
1554 __ pop(rax); // pop second operand off the stack
1555 __ push_f();
1556 __ pop_f(xmm0);
1557 #endif
1558 break;
1559 default:
1560 ShouldNotReachHere();
1561 break;
1562 }
1563 } else {
1564 #ifdef _LP64
1565 ShouldNotReachHere();
1566 #else
1567 switch (op) {
1568 case add: __ fadd_s (at_rsp()); break;
1569 case sub: __ fsubr_s(at_rsp()); break;
1570 case mul: __ fmul_s (at_rsp()); break;
1571 case div: __ fdivr_s(at_rsp()); break;
1572 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1573 default : ShouldNotReachHere();
1574 }
1575 __ f2ieee();
1576 __ pop(rax); // pop second operand off the stack
1577 #endif // _LP64
1578 }
1579 }
1580
dop2(Operation op)1581 void TemplateTable::dop2(Operation op) {
1582 transition(dtos, dtos);
1583 if (UseSSE >= 2) {
1584 switch (op) {
1585 case add:
1586 __ addsd(xmm0, at_rsp());
1587 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1588 break;
1589 case sub:
1590 __ movdbl(xmm1, xmm0);
1591 __ pop_d(xmm0);
1592 __ subsd(xmm0, xmm1);
1593 break;
1594 case mul:
1595 __ mulsd(xmm0, at_rsp());
1596 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1597 break;
1598 case div:
1599 __ movdbl(xmm1, xmm0);
1600 __ pop_d(xmm0);
1601 __ divsd(xmm0, xmm1);
1602 break;
1603 case rem:
1604 // Similar to fop2(), the modulo operation is performed using the
1605 // SharedRuntime::drem method (on x86_64 platforms) or using the
1606 // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1607 #ifdef _LP64
1608 __ movdbl(xmm1, xmm0);
1609 __ pop_d(xmm0);
1610 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1611 #else
1612 __ push_d(xmm0);
1613 __ pop_d();
1614 __ fld_d(at_rsp());
1615 __ fremr(rax);
1616 __ d2ieee();
1617 __ pop(rax);
1618 __ pop(rdx);
1619 __ push_d();
1620 __ pop_d(xmm0);
1621 #endif
1622 break;
1623 default:
1624 ShouldNotReachHere();
1625 break;
1626 }
1627 } else {
1628 #ifdef _LP64
1629 ShouldNotReachHere();
1630 #else
1631 switch (op) {
1632 case add: __ fadd_d (at_rsp()); break;
1633 case sub: __ fsubr_d(at_rsp()); break;
1634 case mul: {
1635 Label L_strict;
1636 Label L_join;
1637 const Address access_flags (rcx, Method::access_flags_offset());
1638 __ get_method(rcx);
1639 __ movl(rcx, access_flags);
1640 __ testl(rcx, JVM_ACC_STRICT);
1641 __ jccb(Assembler::notZero, L_strict);
1642 __ fmul_d (at_rsp());
1643 __ jmpb(L_join);
1644 __ bind(L_strict);
1645 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1646 __ fmulp();
1647 __ fmul_d (at_rsp());
1648 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1649 __ fmulp();
1650 __ bind(L_join);
1651 break;
1652 }
1653 case div: {
1654 Label L_strict;
1655 Label L_join;
1656 const Address access_flags (rcx, Method::access_flags_offset());
1657 __ get_method(rcx);
1658 __ movl(rcx, access_flags);
1659 __ testl(rcx, JVM_ACC_STRICT);
1660 __ jccb(Assembler::notZero, L_strict);
1661 __ fdivr_d(at_rsp());
1662 __ jmp(L_join);
1663 __ bind(L_strict);
1664 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1665 __ fmul_d (at_rsp());
1666 __ fdivrp();
1667 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1668 __ fmulp();
1669 __ bind(L_join);
1670 break;
1671 }
1672 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1673 default : ShouldNotReachHere();
1674 }
1675 __ d2ieee();
1676 // Pop double precision number from rsp.
1677 __ pop(rax);
1678 __ pop(rdx);
1679 #endif
1680 }
1681 }
1682
ineg()1683 void TemplateTable::ineg() {
1684 transition(itos, itos);
1685 __ negl(rax);
1686 }
1687
lneg()1688 void TemplateTable::lneg() {
1689 transition(ltos, ltos);
1690 LP64_ONLY(__ negq(rax));
1691 NOT_LP64(__ lneg(rdx, rax));
1692 }
1693
1694 // Note: 'double' and 'long long' have 32-bits alignment on x86.
double_quadword(jlong * adr,jlong lo,jlong hi)1695 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1696 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1697 // of 128-bits operands for SSE instructions.
1698 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1699 // Store the value to a 128-bits operand.
1700 operand[0] = lo;
1701 operand[1] = hi;
1702 return operand;
1703 }
1704
1705 // Buffer for 128-bits masks used by SSE instructions.
1706 static jlong float_signflip_pool[2*2];
1707 static jlong double_signflip_pool[2*2];
1708
fneg()1709 void TemplateTable::fneg() {
1710 transition(ftos, ftos);
1711 if (UseSSE >= 1) {
1712 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
1713 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1714 } else {
1715 LP64_ONLY(ShouldNotReachHere());
1716 NOT_LP64(__ fchs());
1717 }
1718 }
1719
dneg()1720 void TemplateTable::dneg() {
1721 transition(dtos, dtos);
1722 if (UseSSE >= 2) {
1723 static jlong *double_signflip =
1724 double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1725 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1726 } else {
1727 #ifdef _LP64
1728 ShouldNotReachHere();
1729 #else
1730 __ fchs();
1731 #endif
1732 }
1733 }
1734
iinc()1735 void TemplateTable::iinc() {
1736 transition(vtos, vtos);
1737 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1738 locals_index(rbx);
1739 __ addl(iaddress(rbx), rdx);
1740 }
1741
wide_iinc()1742 void TemplateTable::wide_iinc() {
1743 transition(vtos, vtos);
1744 __ movl(rdx, at_bcp(4)); // get constant
1745 locals_index_wide(rbx);
1746 __ bswapl(rdx); // swap bytes & sign-extend constant
1747 __ sarl(rdx, 16);
1748 __ addl(iaddress(rbx), rdx);
1749 // Note: should probably use only one movl to get both
1750 // the index and the constant -> fix this
1751 }
1752
convert()1753 void TemplateTable::convert() {
1754 #ifdef _LP64
1755 // Checking
1756 #ifdef ASSERT
1757 {
1758 TosState tos_in = ilgl;
1759 TosState tos_out = ilgl;
1760 switch (bytecode()) {
1761 case Bytecodes::_i2l: // fall through
1762 case Bytecodes::_i2f: // fall through
1763 case Bytecodes::_i2d: // fall through
1764 case Bytecodes::_i2b: // fall through
1765 case Bytecodes::_i2c: // fall through
1766 case Bytecodes::_i2s: tos_in = itos; break;
1767 case Bytecodes::_l2i: // fall through
1768 case Bytecodes::_l2f: // fall through
1769 case Bytecodes::_l2d: tos_in = ltos; break;
1770 case Bytecodes::_f2i: // fall through
1771 case Bytecodes::_f2l: // fall through
1772 case Bytecodes::_f2d: tos_in = ftos; break;
1773 case Bytecodes::_d2i: // fall through
1774 case Bytecodes::_d2l: // fall through
1775 case Bytecodes::_d2f: tos_in = dtos; break;
1776 default : ShouldNotReachHere();
1777 }
1778 switch (bytecode()) {
1779 case Bytecodes::_l2i: // fall through
1780 case Bytecodes::_f2i: // fall through
1781 case Bytecodes::_d2i: // fall through
1782 case Bytecodes::_i2b: // fall through
1783 case Bytecodes::_i2c: // fall through
1784 case Bytecodes::_i2s: tos_out = itos; break;
1785 case Bytecodes::_i2l: // fall through
1786 case Bytecodes::_f2l: // fall through
1787 case Bytecodes::_d2l: tos_out = ltos; break;
1788 case Bytecodes::_i2f: // fall through
1789 case Bytecodes::_l2f: // fall through
1790 case Bytecodes::_d2f: tos_out = ftos; break;
1791 case Bytecodes::_i2d: // fall through
1792 case Bytecodes::_l2d: // fall through
1793 case Bytecodes::_f2d: tos_out = dtos; break;
1794 default : ShouldNotReachHere();
1795 }
1796 transition(tos_in, tos_out);
1797 }
1798 #endif // ASSERT
1799
1800 static const int64_t is_nan = 0x8000000000000000L;
1801
1802 // Conversion
1803 switch (bytecode()) {
1804 case Bytecodes::_i2l:
1805 __ movslq(rax, rax);
1806 break;
1807 case Bytecodes::_i2f:
1808 __ cvtsi2ssl(xmm0, rax);
1809 break;
1810 case Bytecodes::_i2d:
1811 __ cvtsi2sdl(xmm0, rax);
1812 break;
1813 case Bytecodes::_i2b:
1814 __ movsbl(rax, rax);
1815 break;
1816 case Bytecodes::_i2c:
1817 __ movzwl(rax, rax);
1818 break;
1819 case Bytecodes::_i2s:
1820 __ movswl(rax, rax);
1821 break;
1822 case Bytecodes::_l2i:
1823 __ movl(rax, rax);
1824 break;
1825 case Bytecodes::_l2f:
1826 __ cvtsi2ssq(xmm0, rax);
1827 break;
1828 case Bytecodes::_l2d:
1829 __ cvtsi2sdq(xmm0, rax);
1830 break;
1831 case Bytecodes::_f2i:
1832 {
1833 Label L;
1834 __ cvttss2sil(rax, xmm0);
1835 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1836 __ jcc(Assembler::notEqual, L);
1837 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1838 __ bind(L);
1839 }
1840 break;
1841 case Bytecodes::_f2l:
1842 {
1843 Label L;
1844 __ cvttss2siq(rax, xmm0);
1845 // NaN or overflow/underflow?
1846 __ cmp64(rax, ExternalAddress((address) &is_nan));
1847 __ jcc(Assembler::notEqual, L);
1848 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1849 __ bind(L);
1850 }
1851 break;
1852 case Bytecodes::_f2d:
1853 __ cvtss2sd(xmm0, xmm0);
1854 break;
1855 case Bytecodes::_d2i:
1856 {
1857 Label L;
1858 __ cvttsd2sil(rax, xmm0);
1859 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1860 __ jcc(Assembler::notEqual, L);
1861 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1862 __ bind(L);
1863 }
1864 break;
1865 case Bytecodes::_d2l:
1866 {
1867 Label L;
1868 __ cvttsd2siq(rax, xmm0);
1869 // NaN or overflow/underflow?
1870 __ cmp64(rax, ExternalAddress((address) &is_nan));
1871 __ jcc(Assembler::notEqual, L);
1872 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1873 __ bind(L);
1874 }
1875 break;
1876 case Bytecodes::_d2f:
1877 __ cvtsd2ss(xmm0, xmm0);
1878 break;
1879 default:
1880 ShouldNotReachHere();
1881 }
1882 #else
1883 // Checking
1884 #ifdef ASSERT
1885 { TosState tos_in = ilgl;
1886 TosState tos_out = ilgl;
1887 switch (bytecode()) {
1888 case Bytecodes::_i2l: // fall through
1889 case Bytecodes::_i2f: // fall through
1890 case Bytecodes::_i2d: // fall through
1891 case Bytecodes::_i2b: // fall through
1892 case Bytecodes::_i2c: // fall through
1893 case Bytecodes::_i2s: tos_in = itos; break;
1894 case Bytecodes::_l2i: // fall through
1895 case Bytecodes::_l2f: // fall through
1896 case Bytecodes::_l2d: tos_in = ltos; break;
1897 case Bytecodes::_f2i: // fall through
1898 case Bytecodes::_f2l: // fall through
1899 case Bytecodes::_f2d: tos_in = ftos; break;
1900 case Bytecodes::_d2i: // fall through
1901 case Bytecodes::_d2l: // fall through
1902 case Bytecodes::_d2f: tos_in = dtos; break;
1903 default : ShouldNotReachHere();
1904 }
1905 switch (bytecode()) {
1906 case Bytecodes::_l2i: // fall through
1907 case Bytecodes::_f2i: // fall through
1908 case Bytecodes::_d2i: // fall through
1909 case Bytecodes::_i2b: // fall through
1910 case Bytecodes::_i2c: // fall through
1911 case Bytecodes::_i2s: tos_out = itos; break;
1912 case Bytecodes::_i2l: // fall through
1913 case Bytecodes::_f2l: // fall through
1914 case Bytecodes::_d2l: tos_out = ltos; break;
1915 case Bytecodes::_i2f: // fall through
1916 case Bytecodes::_l2f: // fall through
1917 case Bytecodes::_d2f: tos_out = ftos; break;
1918 case Bytecodes::_i2d: // fall through
1919 case Bytecodes::_l2d: // fall through
1920 case Bytecodes::_f2d: tos_out = dtos; break;
1921 default : ShouldNotReachHere();
1922 }
1923 transition(tos_in, tos_out);
1924 }
1925 #endif // ASSERT
1926
1927 // Conversion
1928 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1929 switch (bytecode()) {
1930 case Bytecodes::_i2l:
1931 __ extend_sign(rdx, rax);
1932 break;
1933 case Bytecodes::_i2f:
1934 if (UseSSE >= 1) {
1935 __ cvtsi2ssl(xmm0, rax);
1936 } else {
1937 __ push(rax); // store int on tos
1938 __ fild_s(at_rsp()); // load int to ST0
1939 __ f2ieee(); // truncate to float size
1940 __ pop(rcx); // adjust rsp
1941 }
1942 break;
1943 case Bytecodes::_i2d:
1944 if (UseSSE >= 2) {
1945 __ cvtsi2sdl(xmm0, rax);
1946 } else {
1947 __ push(rax); // add one slot for d2ieee()
1948 __ push(rax); // store int on tos
1949 __ fild_s(at_rsp()); // load int to ST0
1950 __ d2ieee(); // truncate to double size
1951 __ pop(rcx); // adjust rsp
1952 __ pop(rcx);
1953 }
1954 break;
1955 case Bytecodes::_i2b:
1956 __ shll(rax, 24); // truncate upper 24 bits
1957 __ sarl(rax, 24); // and sign-extend byte
1958 LP64_ONLY(__ movsbl(rax, rax));
1959 break;
1960 case Bytecodes::_i2c:
1961 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1962 LP64_ONLY(__ movzwl(rax, rax));
1963 break;
1964 case Bytecodes::_i2s:
1965 __ shll(rax, 16); // truncate upper 16 bits
1966 __ sarl(rax, 16); // and sign-extend short
1967 LP64_ONLY(__ movswl(rax, rax));
1968 break;
1969 case Bytecodes::_l2i:
1970 /* nothing to do */
1971 break;
1972 case Bytecodes::_l2f:
1973 // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1974 // 64-bit long values to floats. On 32-bit platforms it is not possible
1975 // to use that instruction with 64-bit operands, therefore the FPU is
1976 // used to perform the conversion.
1977 __ push(rdx); // store long on tos
1978 __ push(rax);
1979 __ fild_d(at_rsp()); // load long to ST0
1980 __ f2ieee(); // truncate to float size
1981 __ pop(rcx); // adjust rsp
1982 __ pop(rcx);
1983 if (UseSSE >= 1) {
1984 __ push_f();
1985 __ pop_f(xmm0);
1986 }
1987 break;
1988 case Bytecodes::_l2d:
1989 // On 32-bit platforms the FPU is used for conversion because on
1990 // 32-bit platforms it is not not possible to use the cvtsi2sdq
1991 // instruction with 64-bit operands.
1992 __ push(rdx); // store long on tos
1993 __ push(rax);
1994 __ fild_d(at_rsp()); // load long to ST0
1995 __ d2ieee(); // truncate to double size
1996 __ pop(rcx); // adjust rsp
1997 __ pop(rcx);
1998 if (UseSSE >= 2) {
1999 __ push_d();
2000 __ pop_d(xmm0);
2001 }
2002 break;
2003 case Bytecodes::_f2i:
2004 // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
2005 // as it returns 0 for any NaN.
2006 if (UseSSE >= 1) {
2007 __ push_f(xmm0);
2008 } else {
2009 __ push(rcx); // reserve space for argument
2010 __ fstp_s(at_rsp()); // pass float argument on stack
2011 }
2012 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2013 break;
2014 case Bytecodes::_f2l:
2015 // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2016 // as it returns 0 for any NaN.
2017 if (UseSSE >= 1) {
2018 __ push_f(xmm0);
2019 } else {
2020 __ push(rcx); // reserve space for argument
2021 __ fstp_s(at_rsp()); // pass float argument on stack
2022 }
2023 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2024 break;
2025 case Bytecodes::_f2d:
2026 if (UseSSE < 1) {
2027 /* nothing to do */
2028 } else if (UseSSE == 1) {
2029 __ push_f(xmm0);
2030 __ pop_f();
2031 } else { // UseSSE >= 2
2032 __ cvtss2sd(xmm0, xmm0);
2033 }
2034 break;
2035 case Bytecodes::_d2i:
2036 if (UseSSE >= 2) {
2037 __ push_d(xmm0);
2038 } else {
2039 __ push(rcx); // reserve space for argument
2040 __ push(rcx);
2041 __ fstp_d(at_rsp()); // pass double argument on stack
2042 }
2043 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2044 break;
2045 case Bytecodes::_d2l:
2046 if (UseSSE >= 2) {
2047 __ push_d(xmm0);
2048 } else {
2049 __ push(rcx); // reserve space for argument
2050 __ push(rcx);
2051 __ fstp_d(at_rsp()); // pass double argument on stack
2052 }
2053 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2054 break;
2055 case Bytecodes::_d2f:
2056 if (UseSSE <= 1) {
2057 __ push(rcx); // reserve space for f2ieee()
2058 __ f2ieee(); // truncate to float size
2059 __ pop(rcx); // adjust rsp
2060 if (UseSSE == 1) {
2061 // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2062 // the conversion is performed using the FPU in this case.
2063 __ push_f();
2064 __ pop_f(xmm0);
2065 }
2066 } else { // UseSSE >= 2
2067 __ cvtsd2ss(xmm0, xmm0);
2068 }
2069 break;
2070 default :
2071 ShouldNotReachHere();
2072 }
2073 #endif
2074 }
2075
lcmp()2076 void TemplateTable::lcmp() {
2077 transition(ltos, itos);
2078 #ifdef _LP64
2079 Label done;
2080 __ pop_l(rdx);
2081 __ cmpq(rdx, rax);
2082 __ movl(rax, -1);
2083 __ jccb(Assembler::less, done);
2084 __ setb(Assembler::notEqual, rax);
2085 __ movzbl(rax, rax);
2086 __ bind(done);
2087 #else
2088
2089 // y = rdx:rax
2090 __ pop_l(rbx, rcx); // get x = rcx:rbx
2091 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2092 __ mov(rax, rcx);
2093 #endif
2094 }
2095
float_cmp(bool is_float,int unordered_result)2096 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2097 if ((is_float && UseSSE >= 1) ||
2098 (!is_float && UseSSE >= 2)) {
2099 Label done;
2100 if (is_float) {
2101 // XXX get rid of pop here, use ... reg, mem32
2102 __ pop_f(xmm1);
2103 __ ucomiss(xmm1, xmm0);
2104 } else {
2105 // XXX get rid of pop here, use ... reg, mem64
2106 __ pop_d(xmm1);
2107 __ ucomisd(xmm1, xmm0);
2108 }
2109 if (unordered_result < 0) {
2110 __ movl(rax, -1);
2111 __ jccb(Assembler::parity, done);
2112 __ jccb(Assembler::below, done);
2113 __ setb(Assembler::notEqual, rdx);
2114 __ movzbl(rax, rdx);
2115 } else {
2116 __ movl(rax, 1);
2117 __ jccb(Assembler::parity, done);
2118 __ jccb(Assembler::above, done);
2119 __ movl(rax, 0);
2120 __ jccb(Assembler::equal, done);
2121 __ decrementl(rax);
2122 }
2123 __ bind(done);
2124 } else {
2125 #ifdef _LP64
2126 ShouldNotReachHere();
2127 #else
2128 if (is_float) {
2129 __ fld_s(at_rsp());
2130 } else {
2131 __ fld_d(at_rsp());
2132 __ pop(rdx);
2133 }
2134 __ pop(rcx);
2135 __ fcmp2int(rax, unordered_result < 0);
2136 #endif // _LP64
2137 }
2138 }
2139
branch(bool is_jsr,bool is_wide)2140 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2141 __ get_method(rcx); // rcx holds method
2142 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2143 // holds bumped taken count
2144
2145 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2146 InvocationCounter::counter_offset();
2147 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2148 InvocationCounter::counter_offset();
2149
2150 // Load up edx with the branch displacement
2151 if (is_wide) {
2152 __ movl(rdx, at_bcp(1));
2153 } else {
2154 __ load_signed_short(rdx, at_bcp(1));
2155 }
2156 __ bswapl(rdx);
2157
2158 if (!is_wide) {
2159 __ sarl(rdx, 16);
2160 }
2161 LP64_ONLY(__ movl2ptr(rdx, rdx));
2162
2163 // Handle all the JSR stuff here, then exit.
2164 // It's much shorter and cleaner than intermingling with the non-JSR
2165 // normal-branch stuff occurring below.
2166 if (is_jsr) {
2167 // Pre-load the next target bytecode into rbx
2168 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2169
2170 // compute return address as bci in rax
2171 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2172 in_bytes(ConstMethod::codes_offset())));
2173 __ subptr(rax, Address(rcx, Method::const_offset()));
2174 // Adjust the bcp in r13 by the displacement in rdx
2175 __ addptr(rbcp, rdx);
2176 // jsr returns atos that is not an oop
2177 __ push_i(rax);
2178 __ dispatch_only(vtos, true);
2179 return;
2180 }
2181
2182 // Normal (non-jsr) branch handling
2183
2184 // Adjust the bcp in r13 by the displacement in rdx
2185 __ addptr(rbcp, rdx);
2186
2187 assert(UseLoopCounter || !UseOnStackReplacement,
2188 "on-stack-replacement requires loop counters");
2189 Label backedge_counter_overflow;
2190 Label profile_method;
2191 Label dispatch;
2192 if (UseLoopCounter) {
2193 // increment backedge counter for backward branches
2194 // rax: MDO
2195 // rbx: MDO bumped taken-count
2196 // rcx: method
2197 // rdx: target offset
2198 // r13: target bcp
2199 // r14: locals pointer
2200 __ testl(rdx, rdx); // check if forward or backward branch
2201 __ jcc(Assembler::positive, dispatch); // count only if backward branch
2202
2203 // check if MethodCounters exists
2204 Label has_counters;
2205 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2206 __ testptr(rax, rax);
2207 __ jcc(Assembler::notZero, has_counters);
2208 __ push(rdx);
2209 __ push(rcx);
2210 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2211 rcx);
2212 __ pop(rcx);
2213 __ pop(rdx);
2214 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2215 __ testptr(rax, rax);
2216 __ jcc(Assembler::zero, dispatch);
2217 __ bind(has_counters);
2218
2219 if (TieredCompilation) {
2220 Label no_mdo;
2221 int increment = InvocationCounter::count_increment;
2222 if (ProfileInterpreter) {
2223 // Are we profiling?
2224 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2225 __ testptr(rbx, rbx);
2226 __ jccb(Assembler::zero, no_mdo);
2227 // Increment the MDO backedge counter
2228 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2229 in_bytes(InvocationCounter::counter_offset()));
2230 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2231 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
2232 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2233 __ jmp(dispatch);
2234 }
2235 __ bind(no_mdo);
2236 // Increment backedge counter in MethodCounters*
2237 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2238 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2239 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2240 rax, false, Assembler::zero,
2241 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2242 } else { // not TieredCompilation
2243 // increment counter
2244 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2245 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
2246 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2247 __ movl(Address(rcx, be_offset), rax); // store counter
2248
2249 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
2250
2251 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2252 __ addl(rax, Address(rcx, be_offset)); // add both counters
2253
2254 if (ProfileInterpreter) {
2255 // Test to see if we should create a method data oop
2256 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2257 __ jcc(Assembler::less, dispatch);
2258
2259 // if no method data exists, go to profile method
2260 __ test_method_data_pointer(rax, profile_method);
2261
2262 if (UseOnStackReplacement) {
2263 // check for overflow against rbx which is the MDO taken count
2264 __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2265 __ jcc(Assembler::below, dispatch);
2266
2267 // When ProfileInterpreter is on, the backedge_count comes
2268 // from the MethodData*, which value does not get reset on
2269 // the call to frequency_counter_overflow(). To avoid
2270 // excessive calls to the overflow routine while the method is
2271 // being compiled, add a second test to make sure the overflow
2272 // function is called only once every overflow_frequency.
2273 const int overflow_frequency = 1024;
2274 __ andl(rbx, overflow_frequency - 1);
2275 __ jcc(Assembler::zero, backedge_counter_overflow);
2276
2277 }
2278 } else {
2279 if (UseOnStackReplacement) {
2280 // check for overflow against rax, which is the sum of the
2281 // counters
2282 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2283 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2284
2285 }
2286 }
2287 }
2288 __ bind(dispatch);
2289 }
2290
2291 // Pre-load the next target bytecode into rbx
2292 __ load_unsigned_byte(rbx, Address(rbcp, 0));
2293
2294 // continue with the bytecode @ target
2295 // rax: return bci for jsr's, unused otherwise
2296 // rbx: target bytecode
2297 // r13: target bcp
2298 __ dispatch_only(vtos, true);
2299
2300 if (UseLoopCounter) {
2301 if (ProfileInterpreter) {
2302 // Out-of-line code to allocate method data oop.
2303 __ bind(profile_method);
2304 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2305 __ set_method_data_pointer_for_bcp();
2306 __ jmp(dispatch);
2307 }
2308
2309 if (UseOnStackReplacement) {
2310 // invocation counter overflow
2311 __ bind(backedge_counter_overflow);
2312 __ negptr(rdx);
2313 __ addptr(rdx, rbcp); // branch bcp
2314 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2315 __ call_VM(noreg,
2316 CAST_FROM_FN_PTR(address,
2317 InterpreterRuntime::frequency_counter_overflow),
2318 rdx);
2319
2320 // rax: osr nmethod (osr ok) or NULL (osr not possible)
2321 // rdx: scratch
2322 // r14: locals pointer
2323 // r13: bcp
2324 __ testptr(rax, rax); // test result
2325 __ jcc(Assembler::zero, dispatch); // no osr if null
2326 // nmethod may have been invalidated (VM may block upon call_VM return)
2327 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2328 __ jcc(Assembler::notEqual, dispatch);
2329
2330 // We have the address of an on stack replacement routine in rax.
2331 // In preparation of invoking it, first we must migrate the locals
2332 // and monitors from off the interpreter frame on the stack.
2333 // Ensure to save the osr nmethod over the migration call,
2334 // it will be preserved in rbx.
2335 __ mov(rbx, rax);
2336
2337 NOT_LP64(__ get_thread(rcx));
2338
2339 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2340
2341 // rax is OSR buffer, move it to expected parameter location
2342 LP64_ONLY(__ mov(j_rarg0, rax));
2343 NOT_LP64(__ mov(rcx, rax));
2344 // We use j_rarg definitions here so that registers don't conflict as parameter
2345 // registers change across platforms as we are in the midst of a calling
2346 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2347
2348 const Register retaddr = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2349 const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2350
2351 // pop the interpreter frame
2352 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2353 __ leave(); // remove frame anchor
2354 __ pop(retaddr); // get return address
2355 __ mov(rsp, sender_sp); // set sp to sender sp
2356 // Ensure compiled code always sees stack at proper alignment
2357 __ andptr(rsp, -(StackAlignmentInBytes));
2358
2359 // unlike x86 we need no specialized return from compiled code
2360 // to the interpreter or the call stub.
2361
2362 // push the return address
2363 __ push(retaddr);
2364
2365 // and begin the OSR nmethod
2366 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2367 }
2368 }
2369 }
2370
if_0cmp(Condition cc)2371 void TemplateTable::if_0cmp(Condition cc) {
2372 transition(itos, vtos);
2373 // assume branch is more often taken than not (loops use backward branches)
2374 Label not_taken;
2375 __ testl(rax, rax);
2376 __ jcc(j_not(cc), not_taken);
2377 branch(false, false);
2378 __ bind(not_taken);
2379 __ profile_not_taken_branch(rax);
2380 }
2381
if_icmp(Condition cc)2382 void TemplateTable::if_icmp(Condition cc) {
2383 transition(itos, vtos);
2384 // assume branch is more often taken than not (loops use backward branches)
2385 Label not_taken;
2386 __ pop_i(rdx);
2387 __ cmpl(rdx, rax);
2388 __ jcc(j_not(cc), not_taken);
2389 branch(false, false);
2390 __ bind(not_taken);
2391 __ profile_not_taken_branch(rax);
2392 }
2393
if_nullcmp(Condition cc)2394 void TemplateTable::if_nullcmp(Condition cc) {
2395 transition(atos, vtos);
2396 // assume branch is more often taken than not (loops use backward branches)
2397 Label not_taken;
2398 __ testptr(rax, rax);
2399 __ jcc(j_not(cc), not_taken);
2400 branch(false, false);
2401 __ bind(not_taken);
2402 __ profile_not_taken_branch(rax);
2403 }
2404
if_acmp(Condition cc)2405 void TemplateTable::if_acmp(Condition cc) {
2406 transition(atos, vtos);
2407 // assume branch is more often taken than not (loops use backward branches)
2408 Label not_taken;
2409 __ pop_ptr(rdx);
2410 __ cmpoop(rdx, rax);
2411 __ jcc(j_not(cc), not_taken);
2412 branch(false, false);
2413 __ bind(not_taken);
2414 __ profile_not_taken_branch(rax);
2415 }
2416
ret()2417 void TemplateTable::ret() {
2418 transition(vtos, vtos);
2419 locals_index(rbx);
2420 LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2421 NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2422 __ profile_ret(rbx, rcx);
2423 __ get_method(rax);
2424 __ movptr(rbcp, Address(rax, Method::const_offset()));
2425 __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2426 ConstMethod::codes_offset()));
2427 __ dispatch_next(vtos, 0, true);
2428 }
2429
wide_ret()2430 void TemplateTable::wide_ret() {
2431 transition(vtos, vtos);
2432 locals_index_wide(rbx);
2433 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2434 __ profile_ret(rbx, rcx);
2435 __ get_method(rax);
2436 __ movptr(rbcp, Address(rax, Method::const_offset()));
2437 __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2438 __ dispatch_next(vtos, 0, true);
2439 }
2440
tableswitch()2441 void TemplateTable::tableswitch() {
2442 Label default_case, continue_execution;
2443 transition(itos, vtos);
2444
2445 // align r13/rsi
2446 __ lea(rbx, at_bcp(BytesPerInt));
2447 __ andptr(rbx, -BytesPerInt);
2448 // load lo & hi
2449 __ movl(rcx, Address(rbx, BytesPerInt));
2450 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2451 __ bswapl(rcx);
2452 __ bswapl(rdx);
2453 // check against lo & hi
2454 __ cmpl(rax, rcx);
2455 __ jcc(Assembler::less, default_case);
2456 __ cmpl(rax, rdx);
2457 __ jcc(Assembler::greater, default_case);
2458 // lookup dispatch offset
2459 __ subl(rax, rcx);
2460 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2461 __ profile_switch_case(rax, rbx, rcx);
2462 // continue execution
2463 __ bind(continue_execution);
2464 __ bswapl(rdx);
2465 LP64_ONLY(__ movl2ptr(rdx, rdx));
2466 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2467 __ addptr(rbcp, rdx);
2468 __ dispatch_only(vtos, true);
2469 // handle default
2470 __ bind(default_case);
2471 __ profile_switch_default(rax);
2472 __ movl(rdx, Address(rbx, 0));
2473 __ jmp(continue_execution);
2474 }
2475
lookupswitch()2476 void TemplateTable::lookupswitch() {
2477 transition(itos, itos);
2478 __ stop("lookupswitch bytecode should have been rewritten");
2479 }
2480
fast_linearswitch()2481 void TemplateTable::fast_linearswitch() {
2482 transition(itos, vtos);
2483 Label loop_entry, loop, found, continue_execution;
2484 // bswap rax so we can avoid bswapping the table entries
2485 __ bswapl(rax);
2486 // align r13
2487 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2488 // this instruction (change offsets
2489 // below)
2490 __ andptr(rbx, -BytesPerInt);
2491 // set counter
2492 __ movl(rcx, Address(rbx, BytesPerInt));
2493 __ bswapl(rcx);
2494 __ jmpb(loop_entry);
2495 // table search
2496 __ bind(loop);
2497 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2498 __ jcc(Assembler::equal, found);
2499 __ bind(loop_entry);
2500 __ decrementl(rcx);
2501 __ jcc(Assembler::greaterEqual, loop);
2502 // default case
2503 __ profile_switch_default(rax);
2504 __ movl(rdx, Address(rbx, 0));
2505 __ jmp(continue_execution);
2506 // entry found -> get offset
2507 __ bind(found);
2508 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2509 __ profile_switch_case(rcx, rax, rbx);
2510 // continue execution
2511 __ bind(continue_execution);
2512 __ bswapl(rdx);
2513 __ movl2ptr(rdx, rdx);
2514 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2515 __ addptr(rbcp, rdx);
2516 __ dispatch_only(vtos, true);
2517 }
2518
fast_binaryswitch()2519 void TemplateTable::fast_binaryswitch() {
2520 transition(itos, vtos);
2521 // Implementation using the following core algorithm:
2522 //
2523 // int binary_search(int key, LookupswitchPair* array, int n) {
2524 // // Binary search according to "Methodik des Programmierens" by
2525 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2526 // int i = 0;
2527 // int j = n;
2528 // while (i+1 < j) {
2529 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2530 // // with Q: for all i: 0 <= i < n: key < a[i]
2531 // // where a stands for the array and assuming that the (inexisting)
2532 // // element a[n] is infinitely big.
2533 // int h = (i + j) >> 1;
2534 // // i < h < j
2535 // if (key < array[h].fast_match()) {
2536 // j = h;
2537 // } else {
2538 // i = h;
2539 // }
2540 // }
2541 // // R: a[i] <= key < a[i+1] or Q
2542 // // (i.e., if key is within array, i is the correct index)
2543 // return i;
2544 // }
2545
2546 // Register allocation
2547 const Register key = rax; // already set (tosca)
2548 const Register array = rbx;
2549 const Register i = rcx;
2550 const Register j = rdx;
2551 const Register h = rdi;
2552 const Register temp = rsi;
2553
2554 // Find array start
2555 NOT_LP64(__ save_bcp());
2556
2557 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2558 // get rid of this
2559 // instruction (change
2560 // offsets below)
2561 __ andptr(array, -BytesPerInt);
2562
2563 // Initialize i & j
2564 __ xorl(i, i); // i = 0;
2565 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2566
2567 // Convert j into native byteordering
2568 __ bswapl(j);
2569
2570 // And start
2571 Label entry;
2572 __ jmp(entry);
2573
2574 // binary search loop
2575 {
2576 Label loop;
2577 __ bind(loop);
2578 // int h = (i + j) >> 1;
2579 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2580 __ sarl(h, 1); // h = (i + j) >> 1;
2581 // if (key < array[h].fast_match()) {
2582 // j = h;
2583 // } else {
2584 // i = h;
2585 // }
2586 // Convert array[h].match to native byte-ordering before compare
2587 __ movl(temp, Address(array, h, Address::times_8));
2588 __ bswapl(temp);
2589 __ cmpl(key, temp);
2590 // j = h if (key < array[h].fast_match())
2591 __ cmov32(Assembler::less, j, h);
2592 // i = h if (key >= array[h].fast_match())
2593 __ cmov32(Assembler::greaterEqual, i, h);
2594 // while (i+1 < j)
2595 __ bind(entry);
2596 __ leal(h, Address(i, 1)); // i+1
2597 __ cmpl(h, j); // i+1 < j
2598 __ jcc(Assembler::less, loop);
2599 }
2600
2601 // end of binary search, result index is i (must check again!)
2602 Label default_case;
2603 // Convert array[i].match to native byte-ordering before compare
2604 __ movl(temp, Address(array, i, Address::times_8));
2605 __ bswapl(temp);
2606 __ cmpl(key, temp);
2607 __ jcc(Assembler::notEqual, default_case);
2608
2609 // entry found -> j = offset
2610 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2611 __ profile_switch_case(i, key, array);
2612 __ bswapl(j);
2613 LP64_ONLY(__ movslq(j, j));
2614
2615 NOT_LP64(__ restore_bcp());
2616 NOT_LP64(__ restore_locals()); // restore rdi
2617
2618 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2619 __ addptr(rbcp, j);
2620 __ dispatch_only(vtos, true);
2621
2622 // default case -> j = default offset
2623 __ bind(default_case);
2624 __ profile_switch_default(i);
2625 __ movl(j, Address(array, -2 * BytesPerInt));
2626 __ bswapl(j);
2627 LP64_ONLY(__ movslq(j, j));
2628
2629 NOT_LP64(__ restore_bcp());
2630 NOT_LP64(__ restore_locals());
2631
2632 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2633 __ addptr(rbcp, j);
2634 __ dispatch_only(vtos, true);
2635 }
2636
_return(TosState state)2637 void TemplateTable::_return(TosState state) {
2638 transition(state, state);
2639
2640 assert(_desc->calls_vm(),
2641 "inconsistent calls_vm information"); // call in remove_activation
2642
2643 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2644 assert(state == vtos, "only valid state");
2645 Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2646 __ movptr(robj, aaddress(0));
2647 __ load_klass(rdi, robj);
2648 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2649 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2650 Label skip_register_finalizer;
2651 __ jcc(Assembler::zero, skip_register_finalizer);
2652
2653 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2654
2655 __ bind(skip_register_finalizer);
2656 }
2657
2658 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2659 Label no_safepoint;
2660 NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2661 #ifdef _LP64
2662 __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2663 #else
2664 const Register thread = rdi;
2665 __ get_thread(thread);
2666 __ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2667 #endif
2668 __ jcc(Assembler::zero, no_safepoint);
2669 __ push(state);
2670 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2671 InterpreterRuntime::at_safepoint));
2672 __ pop(state);
2673 __ bind(no_safepoint);
2674 }
2675
2676 // Narrow result if state is itos but result type is smaller.
2677 // Need to narrow in the return bytecode rather than in generate_return_entry
2678 // since compiled code callers expect the result to already be narrowed.
2679 if (state == itos) {
2680 __ narrow(rax);
2681 }
2682 __ remove_activation(state, rbcp);
2683
2684 __ jmp(rbcp);
2685 }
2686
2687 // ----------------------------------------------------------------------------
2688 // Volatile variables demand their effects be made known to all CPU's
2689 // in order. Store buffers on most chips allow reads & writes to
2690 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2691 // without some kind of memory barrier (i.e., it's not sufficient that
2692 // the interpreter does not reorder volatile references, the hardware
2693 // also must not reorder them).
2694 //
2695 // According to the new Java Memory Model (JMM):
2696 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2697 // writes act as aquire & release, so:
2698 // (2) A read cannot let unrelated NON-volatile memory refs that
2699 // happen after the read float up to before the read. It's OK for
2700 // non-volatile memory refs that happen before the volatile read to
2701 // float down below it.
2702 // (3) Similar a volatile write cannot let unrelated NON-volatile
2703 // memory refs that happen BEFORE the write float down to after the
2704 // write. It's OK for non-volatile memory refs that happen after the
2705 // volatile write to float up before it.
2706 //
2707 // We only put in barriers around volatile refs (they are expensive),
2708 // not _between_ memory refs (that would require us to track the
2709 // flavor of the previous memory refs). Requirements (2) and (3)
2710 // require some barriers before volatile stores and after volatile
2711 // loads. These nearly cover requirement (1) but miss the
2712 // volatile-store-volatile-load case. This final case is placed after
2713 // volatile-stores although it could just as well go before
2714 // volatile-loads.
2715
volatile_barrier(Assembler::Membar_mask_bits order_constraint)2716 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2717 // Helper function to insert a is-volatile test and memory barrier
2718 __ membar(order_constraint);
2719 }
2720
resolve_cache_and_index(int byte_no,Register Rcache,Register index,size_t index_size)2721 void TemplateTable::resolve_cache_and_index(int byte_no,
2722 Register Rcache,
2723 Register index,
2724 size_t index_size) {
2725 const Register temp = rbx;
2726 assert_different_registers(Rcache, index, temp);
2727
2728 Label resolved;
2729
2730 Bytecodes::Code code = bytecode();
2731 switch (code) {
2732 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2733 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2734 default: break;
2735 }
2736
2737 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2738 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2739 __ cmpl(temp, code); // have we resolved this bytecode?
2740 __ jcc(Assembler::equal, resolved);
2741
2742 // resolve first time through
2743 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2744 __ movl(temp, code);
2745 __ call_VM(noreg, entry, temp);
2746 // Update registers with resolved info
2747 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2748 __ bind(resolved);
2749 }
2750
2751 // The cache and index registers must be set before call
load_field_cp_cache_entry(Register obj,Register cache,Register index,Register off,Register flags,bool is_static=false)2752 void TemplateTable::load_field_cp_cache_entry(Register obj,
2753 Register cache,
2754 Register index,
2755 Register off,
2756 Register flags,
2757 bool is_static = false) {
2758 assert_different_registers(cache, index, flags, off);
2759
2760 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2761 // Field offset
2762 __ movptr(off, Address(cache, index, Address::times_ptr,
2763 in_bytes(cp_base_offset +
2764 ConstantPoolCacheEntry::f2_offset())));
2765 // Flags
2766 __ movl(flags, Address(cache, index, Address::times_ptr,
2767 in_bytes(cp_base_offset +
2768 ConstantPoolCacheEntry::flags_offset())));
2769
2770 // klass overwrite register
2771 if (is_static) {
2772 __ movptr(obj, Address(cache, index, Address::times_ptr,
2773 in_bytes(cp_base_offset +
2774 ConstantPoolCacheEntry::f1_offset())));
2775 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2776 __ movptr(obj, Address(obj, mirror_offset));
2777 __ resolve_oop_handle(obj);
2778 }
2779 }
2780
load_invoke_cp_cache_entry(int byte_no,Register method,Register itable_index,Register flags,bool is_invokevirtual,bool is_invokevfinal,bool is_invokedynamic)2781 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2782 Register method,
2783 Register itable_index,
2784 Register flags,
2785 bool is_invokevirtual,
2786 bool is_invokevfinal, /*unused*/
2787 bool is_invokedynamic) {
2788 // setup registers
2789 const Register cache = rcx;
2790 const Register index = rdx;
2791 assert_different_registers(method, flags);
2792 assert_different_registers(method, cache, index);
2793 assert_different_registers(itable_index, flags);
2794 assert_different_registers(itable_index, cache, index);
2795 // determine constant pool cache field offsets
2796 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2797 const int method_offset = in_bytes(
2798 ConstantPoolCache::base_offset() +
2799 ((byte_no == f2_byte)
2800 ? ConstantPoolCacheEntry::f2_offset()
2801 : ConstantPoolCacheEntry::f1_offset()));
2802 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2803 ConstantPoolCacheEntry::flags_offset());
2804 // access constant pool cache fields
2805 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2806 ConstantPoolCacheEntry::f2_offset());
2807
2808 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2809 resolve_cache_and_index(byte_no, cache, index, index_size);
2810 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2811
2812 if (itable_index != noreg) {
2813 // pick up itable or appendix index from f2 also:
2814 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2815 }
2816 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2817 }
2818
2819 // The registers cache and index expected to be set before call.
2820 // Correct values of the cache and index registers are preserved.
jvmti_post_field_access(Register cache,Register index,bool is_static,bool has_tos)2821 void TemplateTable::jvmti_post_field_access(Register cache,
2822 Register index,
2823 bool is_static,
2824 bool has_tos) {
2825 if (JvmtiExport::can_post_field_access()) {
2826 // Check to see if a field access watch has been set before we take
2827 // the time to call into the VM.
2828 Label L1;
2829 assert_different_registers(cache, index, rax);
2830 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2831 __ testl(rax,rax);
2832 __ jcc(Assembler::zero, L1);
2833
2834 // cache entry pointer
2835 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2836 __ shll(index, LogBytesPerWord);
2837 __ addptr(cache, index);
2838 if (is_static) {
2839 __ xorptr(rax, rax); // NULL object reference
2840 } else {
2841 __ pop(atos); // Get the object
2842 __ verify_oop(rax);
2843 __ push(atos); // Restore stack state
2844 }
2845 // rax,: object pointer or NULL
2846 // cache: cache entry pointer
2847 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2848 rax, cache);
2849 __ get_cache_and_index_at_bcp(cache, index, 1);
2850 __ bind(L1);
2851 }
2852 }
2853
pop_and_check_object(Register r)2854 void TemplateTable::pop_and_check_object(Register r) {
2855 __ pop_ptr(r);
2856 __ null_check(r); // for field access must check obj.
2857 __ verify_oop(r);
2858 }
2859
getfield_or_static(int byte_no,bool is_static,RewriteControl rc)2860 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2861 transition(vtos, vtos);
2862
2863 const Register cache = rcx;
2864 const Register index = rdx;
2865 const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2866 const Register off = rbx;
2867 const Register flags = rax;
2868 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2869
2870 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2871 jvmti_post_field_access(cache, index, is_static, false);
2872 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2873
2874 if (!is_static) pop_and_check_object(obj);
2875
2876 const Address field(obj, off, Address::times_1, 0*wordSize);
2877
2878 Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
2879
2880 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2881 // Make sure we don't need to mask edx after the above shift
2882 assert(btos == 0, "change code, btos != 0");
2883
2884 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2885
2886 __ jcc(Assembler::notZero, notByte);
2887 // btos
2888 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
2889 __ push(btos);
2890 // Rewrite bytecode to be faster
2891 if (!is_static && rc == may_rewrite) {
2892 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2893 }
2894 __ jmp(Done);
2895
2896 __ bind(notByte);
2897 __ cmpl(flags, ztos);
2898 __ jcc(Assembler::notEqual, notBool);
2899
2900 // ztos (same code as btos)
2901 __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
2902 __ push(ztos);
2903 // Rewrite bytecode to be faster
2904 if (!is_static && rc == may_rewrite) {
2905 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2906 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2907 }
2908 __ jmp(Done);
2909
2910 __ bind(notBool);
2911 __ cmpl(flags, atos);
2912 __ jcc(Assembler::notEqual, notObj);
2913 // atos
2914 do_oop_load(_masm, field, rax);
2915 __ push(atos);
2916 if (!is_static && rc == may_rewrite) {
2917 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2918 }
2919 __ jmp(Done);
2920
2921 __ bind(notObj);
2922 __ cmpl(flags, itos);
2923 __ jcc(Assembler::notEqual, notInt);
2924 // itos
2925 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
2926 __ push(itos);
2927 // Rewrite bytecode to be faster
2928 if (!is_static && rc == may_rewrite) {
2929 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2930 }
2931 __ jmp(Done);
2932
2933 __ bind(notInt);
2934 __ cmpl(flags, ctos);
2935 __ jcc(Assembler::notEqual, notChar);
2936 // ctos
2937 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
2938 __ push(ctos);
2939 // Rewrite bytecode to be faster
2940 if (!is_static && rc == may_rewrite) {
2941 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2942 }
2943 __ jmp(Done);
2944
2945 __ bind(notChar);
2946 __ cmpl(flags, stos);
2947 __ jcc(Assembler::notEqual, notShort);
2948 // stos
2949 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
2950 __ push(stos);
2951 // Rewrite bytecode to be faster
2952 if (!is_static && rc == may_rewrite) {
2953 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2954 }
2955 __ jmp(Done);
2956
2957 __ bind(notShort);
2958 __ cmpl(flags, ltos);
2959 __ jcc(Assembler::notEqual, notLong);
2960 // ltos
2961 // Generate code as if volatile (x86_32). There just aren't enough registers to
2962 // save that information and this code is faster than the test.
2963 __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
2964 __ push(ltos);
2965 // Rewrite bytecode to be faster
2966 LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
2967 __ jmp(Done);
2968
2969 __ bind(notLong);
2970 __ cmpl(flags, ftos);
2971 __ jcc(Assembler::notEqual, notFloat);
2972 // ftos
2973
2974 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2975 __ push(ftos);
2976 // Rewrite bytecode to be faster
2977 if (!is_static && rc == may_rewrite) {
2978 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2979 }
2980 __ jmp(Done);
2981
2982 __ bind(notFloat);
2983 #ifdef ASSERT
2984 Label notDouble;
2985 __ cmpl(flags, dtos);
2986 __ jcc(Assembler::notEqual, notDouble);
2987 #endif
2988 // dtos
2989 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
2990 __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
2991 __ push(dtos);
2992 // Rewrite bytecode to be faster
2993 if (!is_static && rc == may_rewrite) {
2994 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2995 }
2996 #ifdef ASSERT
2997 __ jmp(Done);
2998
2999 __ bind(notDouble);
3000 __ stop("Bad state");
3001 #endif
3002
3003 __ bind(Done);
3004 // [jk] not needed currently
3005 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3006 // Assembler::LoadStore));
3007 }
3008
getfield(int byte_no)3009 void TemplateTable::getfield(int byte_no) {
3010 getfield_or_static(byte_no, false);
3011 }
3012
nofast_getfield(int byte_no)3013 void TemplateTable::nofast_getfield(int byte_no) {
3014 getfield_or_static(byte_no, false, may_not_rewrite);
3015 }
3016
getstatic(int byte_no)3017 void TemplateTable::getstatic(int byte_no) {
3018 getfield_or_static(byte_no, true);
3019 }
3020
3021
3022 // The registers cache and index expected to be set before call.
3023 // The function may destroy various registers, just not the cache and index registers.
jvmti_post_field_mod(Register cache,Register index,bool is_static)3024 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3025
3026 const Register robj = LP64_ONLY(c_rarg2) NOT_LP64(rax);
3027 const Register RBX = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
3028 const Register RCX = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3029 const Register RDX = LP64_ONLY(rscratch1) NOT_LP64(rdx);
3030
3031 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3032
3033 if (JvmtiExport::can_post_field_modification()) {
3034 // Check to see if a field modification watch has been set before
3035 // we take the time to call into the VM.
3036 Label L1;
3037 assert_different_registers(cache, index, rax);
3038 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3039 __ testl(rax, rax);
3040 __ jcc(Assembler::zero, L1);
3041
3042 __ get_cache_and_index_at_bcp(robj, RDX, 1);
3043
3044
3045 if (is_static) {
3046 // Life is simple. Null out the object pointer.
3047 __ xorl(RBX, RBX);
3048
3049 } else {
3050 // Life is harder. The stack holds the value on top, followed by
3051 // the object. We don't know the size of the value, though; it
3052 // could be one or two words depending on its type. As a result,
3053 // we must find the type to determine where the object is.
3054 #ifndef _LP64
3055 Label two_word, valsize_known;
3056 #endif
3057 __ movl(RCX, Address(robj, RDX,
3058 Address::times_ptr,
3059 in_bytes(cp_base_offset +
3060 ConstantPoolCacheEntry::flags_offset())));
3061 NOT_LP64(__ mov(rbx, rsp));
3062 __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3063
3064 // Make sure we don't need to mask rcx after the above shift
3065 ConstantPoolCacheEntry::verify_tos_state_shift();
3066 #ifdef _LP64
3067 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
3068 __ cmpl(c_rarg3, ltos);
3069 __ cmovptr(Assembler::equal,
3070 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3071 __ cmpl(c_rarg3, dtos);
3072 __ cmovptr(Assembler::equal,
3073 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3074 #else
3075 __ cmpl(rcx, ltos);
3076 __ jccb(Assembler::equal, two_word);
3077 __ cmpl(rcx, dtos);
3078 __ jccb(Assembler::equal, two_word);
3079 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3080 __ jmpb(valsize_known);
3081
3082 __ bind(two_word);
3083 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3084
3085 __ bind(valsize_known);
3086 // setup object pointer
3087 __ movptr(rbx, Address(rbx, 0));
3088 #endif
3089 }
3090 // cache entry pointer
3091 __ addptr(robj, in_bytes(cp_base_offset));
3092 __ shll(RDX, LogBytesPerWord);
3093 __ addptr(robj, RDX);
3094 // object (tos)
3095 __ mov(RCX, rsp);
3096 // c_rarg1: object pointer set up above (NULL if static)
3097 // c_rarg2: cache entry pointer
3098 // c_rarg3: jvalue object on the stack
3099 __ call_VM(noreg,
3100 CAST_FROM_FN_PTR(address,
3101 InterpreterRuntime::post_field_modification),
3102 RBX, robj, RCX);
3103 __ get_cache_and_index_at_bcp(cache, index, 1);
3104 __ bind(L1);
3105 }
3106 }
3107
putfield_or_static(int byte_no,bool is_static,RewriteControl rc)3108 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3109 transition(vtos, vtos);
3110
3111 const Register cache = rcx;
3112 const Register index = rdx;
3113 const Register obj = rcx;
3114 const Register off = rbx;
3115 const Register flags = rax;
3116
3117 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3118 jvmti_post_field_mod(cache, index, is_static);
3119 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3120
3121 // [jk] not needed currently
3122 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3123 // Assembler::StoreStore));
3124
3125 Label notVolatile, Done;
3126 __ movl(rdx, flags);
3127 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3128 __ andl(rdx, 0x1);
3129
3130 // Check for volatile store
3131 __ testl(rdx, rdx);
3132 __ jcc(Assembler::zero, notVolatile);
3133
3134 putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3135 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3136 Assembler::StoreStore));
3137 __ jmp(Done);
3138 __ bind(notVolatile);
3139
3140 putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3141
3142 __ bind(Done);
3143 }
3144
putfield_or_static_helper(int byte_no,bool is_static,RewriteControl rc,Register obj,Register off,Register flags)3145 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3146 Register obj, Register off, Register flags) {
3147
3148 // field addresses
3149 const Address field(obj, off, Address::times_1, 0*wordSize);
3150 NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3151
3152 Label notByte, notBool, notInt, notShort, notChar,
3153 notLong, notFloat, notObj;
3154 Label Done;
3155
3156 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3157
3158 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3159
3160 assert(btos == 0, "change code, btos != 0");
3161 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3162 __ jcc(Assembler::notZero, notByte);
3163
3164 // btos
3165 {
3166 __ pop(btos);
3167 if (!is_static) pop_and_check_object(obj);
3168 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3169 if (!is_static && rc == may_rewrite) {
3170 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3171 }
3172 __ jmp(Done);
3173 }
3174
3175 __ bind(notByte);
3176 __ cmpl(flags, ztos);
3177 __ jcc(Assembler::notEqual, notBool);
3178
3179 // ztos
3180 {
3181 __ pop(ztos);
3182 if (!is_static) pop_and_check_object(obj);
3183 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3184 if (!is_static && rc == may_rewrite) {
3185 patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3186 }
3187 __ jmp(Done);
3188 }
3189
3190 __ bind(notBool);
3191 __ cmpl(flags, atos);
3192 __ jcc(Assembler::notEqual, notObj);
3193
3194 // atos
3195 {
3196 __ pop(atos);
3197 if (!is_static) pop_and_check_object(obj);
3198 // Store into the field
3199 do_oop_store(_masm, field, rax);
3200 if (!is_static && rc == may_rewrite) {
3201 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3202 }
3203 __ jmp(Done);
3204 }
3205
3206 __ bind(notObj);
3207 __ cmpl(flags, itos);
3208 __ jcc(Assembler::notEqual, notInt);
3209
3210 // itos
3211 {
3212 __ pop(itos);
3213 if (!is_static) pop_and_check_object(obj);
3214 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3215 if (!is_static && rc == may_rewrite) {
3216 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3217 }
3218 __ jmp(Done);
3219 }
3220
3221 __ bind(notInt);
3222 __ cmpl(flags, ctos);
3223 __ jcc(Assembler::notEqual, notChar);
3224
3225 // ctos
3226 {
3227 __ pop(ctos);
3228 if (!is_static) pop_and_check_object(obj);
3229 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3230 if (!is_static && rc == may_rewrite) {
3231 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3232 }
3233 __ jmp(Done);
3234 }
3235
3236 __ bind(notChar);
3237 __ cmpl(flags, stos);
3238 __ jcc(Assembler::notEqual, notShort);
3239
3240 // stos
3241 {
3242 __ pop(stos);
3243 if (!is_static) pop_and_check_object(obj);
3244 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3245 if (!is_static && rc == may_rewrite) {
3246 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3247 }
3248 __ jmp(Done);
3249 }
3250
3251 __ bind(notShort);
3252 __ cmpl(flags, ltos);
3253 __ jcc(Assembler::notEqual, notLong);
3254
3255 // ltos
3256 {
3257 __ pop(ltos);
3258 if (!is_static) pop_and_check_object(obj);
3259 // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3260 __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg);
3261 #ifdef _LP64
3262 if (!is_static && rc == may_rewrite) {
3263 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3264 }
3265 #endif // _LP64
3266 __ jmp(Done);
3267 }
3268
3269 __ bind(notLong);
3270 __ cmpl(flags, ftos);
3271 __ jcc(Assembler::notEqual, notFloat);
3272
3273 // ftos
3274 {
3275 __ pop(ftos);
3276 if (!is_static) pop_and_check_object(obj);
3277 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
3278 if (!is_static && rc == may_rewrite) {
3279 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3280 }
3281 __ jmp(Done);
3282 }
3283
3284 __ bind(notFloat);
3285 #ifdef ASSERT
3286 Label notDouble;
3287 __ cmpl(flags, dtos);
3288 __ jcc(Assembler::notEqual, notDouble);
3289 #endif
3290
3291 // dtos
3292 {
3293 __ pop(dtos);
3294 if (!is_static) pop_and_check_object(obj);
3295 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3296 __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg);
3297 if (!is_static && rc == may_rewrite) {
3298 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3299 }
3300 }
3301
3302 #ifdef ASSERT
3303 __ jmp(Done);
3304
3305 __ bind(notDouble);
3306 __ stop("Bad state");
3307 #endif
3308
3309 __ bind(Done);
3310 }
3311
putfield(int byte_no)3312 void TemplateTable::putfield(int byte_no) {
3313 putfield_or_static(byte_no, false);
3314 }
3315
nofast_putfield(int byte_no)3316 void TemplateTable::nofast_putfield(int byte_no) {
3317 putfield_or_static(byte_no, false, may_not_rewrite);
3318 }
3319
putstatic(int byte_no)3320 void TemplateTable::putstatic(int byte_no) {
3321 putfield_or_static(byte_no, true);
3322 }
3323
jvmti_post_fast_field_mod()3324 void TemplateTable::jvmti_post_fast_field_mod() {
3325
3326 const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3327
3328 if (JvmtiExport::can_post_field_modification()) {
3329 // Check to see if a field modification watch has been set before
3330 // we take the time to call into the VM.
3331 Label L2;
3332 __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3333 __ testl(scratch, scratch);
3334 __ jcc(Assembler::zero, L2);
3335 __ pop_ptr(rbx); // copy the object pointer from tos
3336 __ verify_oop(rbx);
3337 __ push_ptr(rbx); // put the object pointer back on tos
3338 // Save tos values before call_VM() clobbers them. Since we have
3339 // to do it for every data type, we use the saved values as the
3340 // jvalue object.
3341 switch (bytecode()) { // load values into the jvalue object
3342 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3343 case Bytecodes::_fast_bputfield: // fall through
3344 case Bytecodes::_fast_zputfield: // fall through
3345 case Bytecodes::_fast_sputfield: // fall through
3346 case Bytecodes::_fast_cputfield: // fall through
3347 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3348 case Bytecodes::_fast_dputfield: __ push(dtos); break;
3349 case Bytecodes::_fast_fputfield: __ push(ftos); break;
3350 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3351
3352 default:
3353 ShouldNotReachHere();
3354 }
3355 __ mov(scratch, rsp); // points to jvalue on the stack
3356 // access constant pool cache entry
3357 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3358 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3359 __ verify_oop(rbx);
3360 // rbx: object pointer copied above
3361 // c_rarg2: cache entry pointer
3362 // c_rarg3: jvalue object on the stack
3363 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3364 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3365
3366 switch (bytecode()) { // restore tos values
3367 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3368 case Bytecodes::_fast_bputfield: // fall through
3369 case Bytecodes::_fast_zputfield: // fall through
3370 case Bytecodes::_fast_sputfield: // fall through
3371 case Bytecodes::_fast_cputfield: // fall through
3372 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3373 case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3374 case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3375 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3376 default: break;
3377 }
3378 __ bind(L2);
3379 }
3380 }
3381
fast_storefield(TosState state)3382 void TemplateTable::fast_storefield(TosState state) {
3383 transition(state, vtos);
3384
3385 ByteSize base = ConstantPoolCache::base_offset();
3386
3387 jvmti_post_fast_field_mod();
3388
3389 // access constant pool cache
3390 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3391
3392 // test for volatile with rdx but rdx is tos register for lputfield.
3393 __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3394 in_bytes(base +
3395 ConstantPoolCacheEntry::flags_offset())));
3396
3397 // replace index with field offset from cache entry
3398 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3399 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3400
3401 // [jk] not needed currently
3402 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3403 // Assembler::StoreStore));
3404
3405 Label notVolatile, Done;
3406 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3407 __ andl(rdx, 0x1);
3408
3409 // Get object from stack
3410 pop_and_check_object(rcx);
3411
3412 // field address
3413 const Address field(rcx, rbx, Address::times_1);
3414
3415 // Check for volatile store
3416 __ testl(rdx, rdx);
3417 __ jcc(Assembler::zero, notVolatile);
3418
3419 fast_storefield_helper(field, rax);
3420 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3421 Assembler::StoreStore));
3422 __ jmp(Done);
3423 __ bind(notVolatile);
3424
3425 fast_storefield_helper(field, rax);
3426
3427 __ bind(Done);
3428 }
3429
fast_storefield_helper(Address field,Register rax)3430 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3431
3432 // access field
3433 switch (bytecode()) {
3434 case Bytecodes::_fast_aputfield:
3435 do_oop_store(_masm, field, rax);
3436 break;
3437 case Bytecodes::_fast_lputfield:
3438 #ifdef _LP64
3439 __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
3440 #else
3441 __ stop("should not be rewritten");
3442 #endif
3443 break;
3444 case Bytecodes::_fast_iputfield:
3445 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3446 break;
3447 case Bytecodes::_fast_zputfield:
3448 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3449 break;
3450 case Bytecodes::_fast_bputfield:
3451 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3452 break;
3453 case Bytecodes::_fast_sputfield:
3454 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3455 break;
3456 case Bytecodes::_fast_cputfield:
3457 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3458 break;
3459 case Bytecodes::_fast_fputfield:
3460 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
3461 break;
3462 case Bytecodes::_fast_dputfield:
3463 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
3464 break;
3465 default:
3466 ShouldNotReachHere();
3467 }
3468 }
3469
fast_accessfield(TosState state)3470 void TemplateTable::fast_accessfield(TosState state) {
3471 transition(atos, state);
3472
3473 // Do the JVMTI work here to avoid disturbing the register state below
3474 if (JvmtiExport::can_post_field_access()) {
3475 // Check to see if a field access watch has been set before we
3476 // take the time to call into the VM.
3477 Label L1;
3478 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3479 __ testl(rcx, rcx);
3480 __ jcc(Assembler::zero, L1);
3481 // access constant pool cache entry
3482 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3483 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3484 __ verify_oop(rax);
3485 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
3486 LP64_ONLY(__ mov(c_rarg1, rax));
3487 // c_rarg1: object pointer copied above
3488 // c_rarg2: cache entry pointer
3489 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3490 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3491 __ pop_ptr(rax); // restore object pointer
3492 __ bind(L1);
3493 }
3494
3495 // access constant pool cache
3496 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3497 // replace index with field offset from cache entry
3498 // [jk] not needed currently
3499 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3500 // in_bytes(ConstantPoolCache::base_offset() +
3501 // ConstantPoolCacheEntry::flags_offset())));
3502 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3503 // __ andl(rdx, 0x1);
3504 //
3505 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3506 in_bytes(ConstantPoolCache::base_offset() +
3507 ConstantPoolCacheEntry::f2_offset())));
3508
3509 // rax: object
3510 __ verify_oop(rax);
3511 __ null_check(rax);
3512 Address field(rax, rbx, Address::times_1);
3513
3514 // access field
3515 switch (bytecode()) {
3516 case Bytecodes::_fast_agetfield:
3517 do_oop_load(_masm, field, rax);
3518 __ verify_oop(rax);
3519 break;
3520 case Bytecodes::_fast_lgetfield:
3521 #ifdef _LP64
3522 __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3523 #else
3524 __ stop("should not be rewritten");
3525 #endif
3526 break;
3527 case Bytecodes::_fast_igetfield:
3528 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3529 break;
3530 case Bytecodes::_fast_bgetfield:
3531 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3532 break;
3533 case Bytecodes::_fast_sgetfield:
3534 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3535 break;
3536 case Bytecodes::_fast_cgetfield:
3537 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3538 break;
3539 case Bytecodes::_fast_fgetfield:
3540 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3541 break;
3542 case Bytecodes::_fast_dgetfield:
3543 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3544 break;
3545 default:
3546 ShouldNotReachHere();
3547 }
3548 // [jk] not needed currently
3549 // Label notVolatile;
3550 // __ testl(rdx, rdx);
3551 // __ jcc(Assembler::zero, notVolatile);
3552 // __ membar(Assembler::LoadLoad);
3553 // __ bind(notVolatile);
3554 }
3555
fast_xaccess(TosState state)3556 void TemplateTable::fast_xaccess(TosState state) {
3557 transition(vtos, state);
3558
3559 // get receiver
3560 __ movptr(rax, aaddress(0));
3561 // access constant pool cache
3562 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3563 __ movptr(rbx,
3564 Address(rcx, rdx, Address::times_ptr,
3565 in_bytes(ConstantPoolCache::base_offset() +
3566 ConstantPoolCacheEntry::f2_offset())));
3567 // make sure exception is reported in correct bcp range (getfield is
3568 // next instruction)
3569 __ increment(rbcp);
3570 __ null_check(rax);
3571 const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3572 switch (state) {
3573 case itos:
3574 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3575 break;
3576 case atos:
3577 do_oop_load(_masm, field, rax);
3578 __ verify_oop(rax);
3579 break;
3580 case ftos:
3581 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3582 break;
3583 default:
3584 ShouldNotReachHere();
3585 }
3586
3587 // [jk] not needed currently
3588 // Label notVolatile;
3589 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3590 // in_bytes(ConstantPoolCache::base_offset() +
3591 // ConstantPoolCacheEntry::flags_offset())));
3592 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3593 // __ testl(rdx, 0x1);
3594 // __ jcc(Assembler::zero, notVolatile);
3595 // __ membar(Assembler::LoadLoad);
3596 // __ bind(notVolatile);
3597
3598 __ decrement(rbcp);
3599 }
3600
3601 //-----------------------------------------------------------------------------
3602 // Calls
3603
count_calls(Register method,Register temp)3604 void TemplateTable::count_calls(Register method, Register temp) {
3605 // implemented elsewhere
3606 ShouldNotReachHere();
3607 }
3608
prepare_invoke(int byte_no,Register method,Register index,Register recv,Register flags)3609 void TemplateTable::prepare_invoke(int byte_no,
3610 Register method, // linked method (or i-klass)
3611 Register index, // itable index, MethodType, etc.
3612 Register recv, // if caller wants to see it
3613 Register flags // if caller wants to test it
3614 ) {
3615 // determine flags
3616 const Bytecodes::Code code = bytecode();
3617 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3618 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3619 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3620 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3621 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3622 const bool load_receiver = (recv != noreg);
3623 const bool save_flags = (flags != noreg);
3624 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3625 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3626 assert(flags == noreg || flags == rdx, "");
3627 assert(recv == noreg || recv == rcx, "");
3628
3629 // setup registers & access constant pool cache
3630 if (recv == noreg) recv = rcx;
3631 if (flags == noreg) flags = rdx;
3632 assert_different_registers(method, index, recv, flags);
3633
3634 // save 'interpreter return address'
3635 __ save_bcp();
3636
3637 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3638
3639 // maybe push appendix to arguments (just before return address)
3640 if (is_invokedynamic || is_invokehandle) {
3641 Label L_no_push;
3642 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3643 __ jcc(Assembler::zero, L_no_push);
3644 // Push the appendix as a trailing parameter.
3645 // This must be done before we get the receiver,
3646 // since the parameter_size includes it.
3647 __ push(rbx);
3648 __ mov(rbx, index);
3649 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3650 __ load_resolved_reference_at_index(index, rbx);
3651 __ pop(rbx);
3652 __ push(index); // push appendix (MethodType, CallSite, etc.)
3653 __ bind(L_no_push);
3654 }
3655
3656 // load receiver if needed (after appendix is pushed so parameter size is correct)
3657 // Note: no return address pushed yet
3658 if (load_receiver) {
3659 __ movl(recv, flags);
3660 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3661 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3662 const int receiver_is_at_end = -1; // back off one slot to get receiver
3663 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3664 __ movptr(recv, recv_addr);
3665 __ verify_oop(recv);
3666 }
3667
3668 if (save_flags) {
3669 __ movl(rbcp, flags);
3670 }
3671
3672 // compute return type
3673 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3674 // Make sure we don't need to mask flags after the above shift
3675 ConstantPoolCacheEntry::verify_tos_state_shift();
3676 // load return address
3677 {
3678 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3679 ExternalAddress table(table_addr);
3680 LP64_ONLY(__ lea(rscratch1, table));
3681 LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3682 NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3683 }
3684
3685 // push return address
3686 __ push(flags);
3687
3688 // Restore flags value from the constant pool cache, and restore rsi
3689 // for later null checks. r13 is the bytecode pointer
3690 if (save_flags) {
3691 __ movl(flags, rbcp);
3692 __ restore_bcp();
3693 }
3694 }
3695
invokevirtual_helper(Register index,Register recv,Register flags)3696 void TemplateTable::invokevirtual_helper(Register index,
3697 Register recv,
3698 Register flags) {
3699 // Uses temporary registers rax, rdx
3700 assert_different_registers(index, recv, rax, rdx);
3701 assert(index == rbx, "");
3702 assert(recv == rcx, "");
3703
3704 // Test for an invoke of a final method
3705 Label notFinal;
3706 __ movl(rax, flags);
3707 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3708 __ jcc(Assembler::zero, notFinal);
3709
3710 const Register method = index; // method must be rbx
3711 assert(method == rbx,
3712 "Method* must be rbx for interpreter calling convention");
3713
3714 // do the call - the index is actually the method to call
3715 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3716
3717 // It's final, need a null check here!
3718 __ null_check(recv);
3719
3720 // profile this call
3721 __ profile_final_call(rax);
3722 __ profile_arguments_type(rax, method, rbcp, true);
3723
3724 __ jump_from_interpreted(method, rax);
3725
3726 __ bind(notFinal);
3727
3728 // get receiver klass
3729 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3730 __ load_klass(rax, recv);
3731
3732 // profile this call
3733 __ profile_virtual_call(rax, rlocals, rdx);
3734 // get target Method* & entry point
3735 __ lookup_virtual_method(rax, index, method);
3736 __ profile_called_method(method, rdx, rbcp);
3737
3738 __ profile_arguments_type(rdx, method, rbcp, true);
3739 __ jump_from_interpreted(method, rdx);
3740 }
3741
invokevirtual(int byte_no)3742 void TemplateTable::invokevirtual(int byte_no) {
3743 transition(vtos, vtos);
3744 assert(byte_no == f2_byte, "use this argument");
3745 prepare_invoke(byte_no,
3746 rbx, // method or vtable index
3747 noreg, // unused itable index
3748 rcx, rdx); // recv, flags
3749
3750 // rbx: index
3751 // rcx: receiver
3752 // rdx: flags
3753
3754 invokevirtual_helper(rbx, rcx, rdx);
3755 }
3756
invokespecial(int byte_no)3757 void TemplateTable::invokespecial(int byte_no) {
3758 transition(vtos, vtos);
3759 assert(byte_no == f1_byte, "use this argument");
3760 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3761 rcx); // get receiver also for null check
3762 __ verify_oop(rcx);
3763 __ null_check(rcx);
3764 // do the call
3765 __ profile_call(rax);
3766 __ profile_arguments_type(rax, rbx, rbcp, false);
3767 __ jump_from_interpreted(rbx, rax);
3768 }
3769
invokestatic(int byte_no)3770 void TemplateTable::invokestatic(int byte_no) {
3771 transition(vtos, vtos);
3772 assert(byte_no == f1_byte, "use this argument");
3773 prepare_invoke(byte_no, rbx); // get f1 Method*
3774 // do the call
3775 __ profile_call(rax);
3776 __ profile_arguments_type(rax, rbx, rbcp, false);
3777 __ jump_from_interpreted(rbx, rax);
3778 }
3779
3780
fast_invokevfinal(int byte_no)3781 void TemplateTable::fast_invokevfinal(int byte_no) {
3782 transition(vtos, vtos);
3783 assert(byte_no == f2_byte, "use this argument");
3784 __ stop("fast_invokevfinal not used on x86");
3785 }
3786
3787
invokeinterface(int byte_no)3788 void TemplateTable::invokeinterface(int byte_no) {
3789 transition(vtos, vtos);
3790 assert(byte_no == f1_byte, "use this argument");
3791 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 Method*
3792 rcx, rdx); // recv, flags
3793
3794 // rax: reference klass (from f1) if interface method
3795 // rbx: method (from f2)
3796 // rcx: receiver
3797 // rdx: flags
3798
3799 // First check for Object case, then private interface method,
3800 // then regular interface method.
3801
3802 // Special case of invokeinterface called for virtual method of
3803 // java.lang.Object. See cpCache.cpp for details.
3804 Label notObjectMethod;
3805 __ movl(rlocals, rdx);
3806 __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3807 __ jcc(Assembler::zero, notObjectMethod);
3808 invokevirtual_helper(rbx, rcx, rdx);
3809 // no return from above
3810 __ bind(notObjectMethod);
3811
3812 Label no_such_interface; // for receiver subtype check
3813 Register recvKlass; // used for exception processing
3814
3815 // Check for private method invocation - indicated by vfinal
3816 Label notVFinal;
3817 __ movl(rlocals, rdx);
3818 __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3819 __ jcc(Assembler::zero, notVFinal);
3820
3821 // Get receiver klass into rlocals - also a null check
3822 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3823 __ load_klass(rlocals, rcx);
3824
3825 Label subtype;
3826 __ check_klass_subtype(rlocals, rax, rbcp, subtype);
3827 // If we get here the typecheck failed
3828 recvKlass = rdx;
3829 __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
3830 __ jmp(no_such_interface);
3831
3832 __ bind(subtype);
3833
3834 // do the call - rbx is actually the method to call
3835
3836 __ profile_final_call(rdx);
3837 __ profile_arguments_type(rdx, rbx, rbcp, true);
3838
3839 __ jump_from_interpreted(rbx, rdx);
3840 // no return from above
3841 __ bind(notVFinal);
3842
3843 // Get receiver klass into rdx - also a null check
3844 __ restore_locals(); // restore r14
3845 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3846 __ load_klass(rdx, rcx);
3847
3848 Label no_such_method;
3849
3850 // Preserve method for throw_AbstractMethodErrorVerbose.
3851 __ mov(rcx, rbx);
3852 // Receiver subtype check against REFC.
3853 // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3854 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3855 rdx, rax, noreg,
3856 // outputs: scan temp. reg, scan temp. reg
3857 rbcp, rlocals,
3858 no_such_interface,
3859 /*return_method=*/false);
3860
3861 // profile this call
3862 __ restore_bcp(); // rbcp was destroyed by receiver type check
3863 __ profile_virtual_call(rdx, rbcp, rlocals);
3864
3865 // Get declaring interface class from method, and itable index
3866 __ movptr(rax, Address(rbx, Method::const_offset()));
3867 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
3868 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
3869 __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3870 __ subl(rbx, Method::itable_index_max);
3871 __ negl(rbx);
3872
3873 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3874 __ mov(rlocals, rdx);
3875 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3876 rlocals, rax, rbx,
3877 // outputs: method, scan temp. reg
3878 rbx, rbcp,
3879 no_such_interface);
3880
3881 // rbx: Method* to call
3882 // rcx: receiver
3883 // Check for abstract method error
3884 // Note: This should be done more efficiently via a throw_abstract_method_error
3885 // interpreter entry point and a conditional jump to it in case of a null
3886 // method.
3887 __ testptr(rbx, rbx);
3888 __ jcc(Assembler::zero, no_such_method);
3889
3890 __ profile_called_method(rbx, rbcp, rdx);
3891 __ profile_arguments_type(rdx, rbx, rbcp, true);
3892
3893 // do the call
3894 // rcx: receiver
3895 // rbx,: Method*
3896 __ jump_from_interpreted(rbx, rdx);
3897 __ should_not_reach_here();
3898
3899 // exception handling code follows...
3900 // note: must restore interpreter registers to canonical
3901 // state for exception handling to work correctly!
3902
3903 __ bind(no_such_method);
3904 // throw exception
3905 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3906 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3907 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3908 // Pass arguments for generating a verbose error message.
3909 #ifdef _LP64
3910 recvKlass = c_rarg1;
3911 Register method = c_rarg2;
3912 if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
3913 if (method != rcx) { __ movq(method, rcx); }
3914 #else
3915 recvKlass = rdx;
3916 Register method = rcx;
3917 #endif
3918 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3919 recvKlass, method);
3920 // The call_VM checks for exception, so we should never return here.
3921 __ should_not_reach_here();
3922
3923 __ bind(no_such_interface);
3924 // throw exception
3925 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3926 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3927 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3928 // Pass arguments for generating a verbose error message.
3929 LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
3930 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3931 recvKlass, rax);
3932 // the call_VM checks for exception, so we should never return here.
3933 __ should_not_reach_here();
3934 }
3935
invokehandle(int byte_no)3936 void TemplateTable::invokehandle(int byte_no) {
3937 transition(vtos, vtos);
3938 assert(byte_no == f1_byte, "use this argument");
3939 const Register rbx_method = rbx;
3940 const Register rax_mtype = rax;
3941 const Register rcx_recv = rcx;
3942 const Register rdx_flags = rdx;
3943
3944 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3945 __ verify_method_ptr(rbx_method);
3946 __ verify_oop(rcx_recv);
3947 __ null_check(rcx_recv);
3948
3949 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3950 // rbx: MH.invokeExact_MT method (from f2)
3951
3952 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3953
3954 // FIXME: profile the LambdaForm also
3955 __ profile_final_call(rax);
3956 __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3957
3958 __ jump_from_interpreted(rbx_method, rdx);
3959 }
3960
invokedynamic(int byte_no)3961 void TemplateTable::invokedynamic(int byte_no) {
3962 transition(vtos, vtos);
3963 assert(byte_no == f1_byte, "use this argument");
3964
3965 const Register rbx_method = rbx;
3966 const Register rax_callsite = rax;
3967
3968 prepare_invoke(byte_no, rbx_method, rax_callsite);
3969
3970 // rax: CallSite object (from cpool->resolved_references[f1])
3971 // rbx: MH.linkToCallSite method (from f2)
3972
3973 // Note: rax_callsite is already pushed by prepare_invoke
3974
3975 // %%% should make a type profile for any invokedynamic that takes a ref argument
3976 // profile this call
3977 __ profile_call(rbcp);
3978 __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3979
3980 __ verify_oop(rax_callsite);
3981
3982 __ jump_from_interpreted(rbx_method, rdx);
3983 }
3984
3985 //-----------------------------------------------------------------------------
3986 // Allocation
3987
_new()3988 void TemplateTable::_new() {
3989 transition(vtos, atos);
3990 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3991 Label slow_case;
3992 Label slow_case_no_pop;
3993 Label done;
3994 Label initialize_header;
3995 Label initialize_object; // including clearing the fields
3996
3997 __ get_cpool_and_tags(rcx, rax);
3998
3999 // Make sure the class we're about to instantiate has been resolved.
4000 // This is done before loading InstanceKlass to be consistent with the order
4001 // how Constant Pool is updated (see ConstantPool::klass_at_put)
4002 const int tags_offset = Array<u1>::base_offset_in_bytes();
4003 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4004 __ jcc(Assembler::notEqual, slow_case_no_pop);
4005
4006 // get InstanceKlass
4007 __ load_resolved_klass_at_index(rcx, rdx, rcx);
4008 __ push(rcx); // save the contexts of klass for initializing the header
4009
4010 // make sure klass is initialized & doesn't have finalizer
4011 // make sure klass is fully initialized
4012 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4013 __ jcc(Assembler::notEqual, slow_case);
4014
4015 // get instance_size in InstanceKlass (scaled to a count of bytes)
4016 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
4017 // test to see if it has a finalizer or is malformed in some way
4018 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
4019 __ jcc(Assembler::notZero, slow_case);
4020
4021 // Allocate the instance:
4022 // If TLAB is enabled:
4023 // Try to allocate in the TLAB.
4024 // If fails, go to the slow path.
4025 // Else If inline contiguous allocations are enabled:
4026 // Try to allocate in eden.
4027 // If fails due to heap end, go to slow path.
4028 //
4029 // If TLAB is enabled OR inline contiguous is enabled:
4030 // Initialize the allocation.
4031 // Exit.
4032 //
4033 // Go to slow path.
4034
4035 const bool allow_shared_alloc =
4036 Universe::heap()->supports_inline_contig_alloc();
4037
4038 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
4039 #ifndef _LP64
4040 if (UseTLAB || allow_shared_alloc) {
4041 __ get_thread(thread);
4042 }
4043 #endif // _LP64
4044
4045 if (UseTLAB) {
4046 __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
4047 if (ZeroTLAB) {
4048 // the fields have been already cleared
4049 __ jmp(initialize_header);
4050 } else {
4051 // initialize both the header and fields
4052 __ jmp(initialize_object);
4053 }
4054 } else {
4055 // Allocation in the shared Eden, if allowed.
4056 //
4057 // rdx: instance size in bytes
4058 __ eden_allocate(thread, rax, rdx, 0, rbx, slow_case);
4059 }
4060
4061 // If UseTLAB or allow_shared_alloc are true, the object is created above and
4062 // there is an initialize need. Otherwise, skip and go to the slow path.
4063 if (UseTLAB || allow_shared_alloc) {
4064 // The object is initialized before the header. If the object size is
4065 // zero, go directly to the header initialization.
4066 __ bind(initialize_object);
4067 __ decrement(rdx, sizeof(oopDesc));
4068 __ jcc(Assembler::zero, initialize_header);
4069
4070 // Initialize topmost object field, divide rdx by 8, check if odd and
4071 // test if zero.
4072 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
4073 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4074
4075 // rdx must have been multiple of 8
4076 #ifdef ASSERT
4077 // make sure rdx was multiple of 8
4078 Label L;
4079 // Ignore partial flag stall after shrl() since it is debug VM
4080 __ jcc(Assembler::carryClear, L);
4081 __ stop("object size is not multiple of 2 - adjust this code");
4082 __ bind(L);
4083 // rdx must be > 0, no extra check needed here
4084 #endif
4085
4086 // initialize remaining object fields: rdx was a multiple of 8
4087 { Label loop;
4088 __ bind(loop);
4089 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4090 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4091 __ decrement(rdx);
4092 __ jcc(Assembler::notZero, loop);
4093 }
4094
4095 // initialize object header only.
4096 __ bind(initialize_header);
4097 if (UseBiasedLocking) {
4098 __ pop(rcx); // get saved klass back in the register.
4099 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4100 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
4101 } else {
4102 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
4103 (intptr_t)markOopDesc::prototype()); // header
4104 __ pop(rcx); // get saved klass back in the register.
4105 }
4106 #ifdef _LP64
4107 __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4108 __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
4109 #endif
4110 __ store_klass(rax, rcx); // klass
4111
4112 {
4113 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4114 // Trigger dtrace event for fastpath
4115 __ push(atos);
4116 __ call_VM_leaf(
4117 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4118 __ pop(atos);
4119 }
4120
4121 __ jmp(done);
4122 }
4123
4124 // slow case
4125 __ bind(slow_case);
4126 __ pop(rcx); // restore stack pointer to what it was when we came in.
4127 __ bind(slow_case_no_pop);
4128
4129 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4130 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4131
4132 __ get_constant_pool(rarg1);
4133 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4134 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4135 __ verify_oop(rax);
4136
4137 // continue
4138 __ bind(done);
4139 }
4140
newarray()4141 void TemplateTable::newarray() {
4142 transition(itos, atos);
4143 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4144 __ load_unsigned_byte(rarg1, at_bcp(1));
4145 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4146 rarg1, rax);
4147 }
4148
anewarray()4149 void TemplateTable::anewarray() {
4150 transition(itos, atos);
4151
4152 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4153 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4154
4155 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4156 __ get_constant_pool(rarg1);
4157 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4158 rarg1, rarg2, rax);
4159 }
4160
arraylength()4161 void TemplateTable::arraylength() {
4162 transition(atos, itos);
4163 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4164 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4165 }
4166
checkcast()4167 void TemplateTable::checkcast() {
4168 transition(atos, atos);
4169 Label done, is_null, ok_is_subtype, quicked, resolved;
4170 __ testptr(rax, rax); // object is in rax
4171 __ jcc(Assembler::zero, is_null);
4172
4173 // Get cpool & tags index
4174 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4175 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4176 // See if bytecode has already been quicked
4177 __ cmpb(Address(rdx, rbx,
4178 Address::times_1,
4179 Array<u1>::base_offset_in_bytes()),
4180 JVM_CONSTANT_Class);
4181 __ jcc(Assembler::equal, quicked);
4182 __ push(atos); // save receiver for result, and for GC
4183 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4184
4185 // vm_result_2 has metadata result
4186 #ifndef _LP64
4187 // borrow rdi from locals
4188 __ get_thread(rdi);
4189 __ get_vm_result_2(rax, rdi);
4190 __ restore_locals();
4191 #else
4192 __ get_vm_result_2(rax, r15_thread);
4193 #endif
4194
4195 __ pop_ptr(rdx); // restore receiver
4196 __ jmpb(resolved);
4197
4198 // Get superklass in rax and subklass in rbx
4199 __ bind(quicked);
4200 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4201 __ load_resolved_klass_at_index(rcx, rbx, rax);
4202
4203 __ bind(resolved);
4204 __ load_klass(rbx, rdx);
4205
4206 // Generate subtype check. Blows rcx, rdi. Object in rdx.
4207 // Superklass in rax. Subklass in rbx.
4208 __ gen_subtype_check(rbx, ok_is_subtype);
4209
4210 // Come here on failure
4211 __ push_ptr(rdx);
4212 // object is at TOS
4213 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4214
4215 // Come here on success
4216 __ bind(ok_is_subtype);
4217 __ mov(rax, rdx); // Restore object in rdx
4218
4219 // Collect counts on whether this check-cast sees NULLs a lot or not.
4220 if (ProfileInterpreter) {
4221 __ jmp(done);
4222 __ bind(is_null);
4223 __ profile_null_seen(rcx);
4224 } else {
4225 __ bind(is_null); // same as 'done'
4226 }
4227 __ bind(done);
4228 }
4229
instanceof()4230 void TemplateTable::instanceof() {
4231 transition(atos, itos);
4232 Label done, is_null, ok_is_subtype, quicked, resolved;
4233 __ testptr(rax, rax);
4234 __ jcc(Assembler::zero, is_null);
4235
4236 // Get cpool & tags index
4237 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4238 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4239 // See if bytecode has already been quicked
4240 __ cmpb(Address(rdx, rbx,
4241 Address::times_1,
4242 Array<u1>::base_offset_in_bytes()),
4243 JVM_CONSTANT_Class);
4244 __ jcc(Assembler::equal, quicked);
4245
4246 __ push(atos); // save receiver for result, and for GC
4247 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4248 // vm_result_2 has metadata result
4249
4250 #ifndef _LP64
4251 // borrow rdi from locals
4252 __ get_thread(rdi);
4253 __ get_vm_result_2(rax, rdi);
4254 __ restore_locals();
4255 #else
4256 __ get_vm_result_2(rax, r15_thread);
4257 #endif
4258
4259 __ pop_ptr(rdx); // restore receiver
4260 __ verify_oop(rdx);
4261 __ load_klass(rdx, rdx);
4262 __ jmpb(resolved);
4263
4264 // Get superklass in rax and subklass in rdx
4265 __ bind(quicked);
4266 __ load_klass(rdx, rax);
4267 __ load_resolved_klass_at_index(rcx, rbx, rax);
4268
4269 __ bind(resolved);
4270
4271 // Generate subtype check. Blows rcx, rdi
4272 // Superklass in rax. Subklass in rdx.
4273 __ gen_subtype_check(rdx, ok_is_subtype);
4274
4275 // Come here on failure
4276 __ xorl(rax, rax);
4277 __ jmpb(done);
4278 // Come here on success
4279 __ bind(ok_is_subtype);
4280 __ movl(rax, 1);
4281
4282 // Collect counts on whether this test sees NULLs a lot or not.
4283 if (ProfileInterpreter) {
4284 __ jmp(done);
4285 __ bind(is_null);
4286 __ profile_null_seen(rcx);
4287 } else {
4288 __ bind(is_null); // same as 'done'
4289 }
4290 __ bind(done);
4291 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
4292 // rax = 1: obj != NULL and obj is an instanceof the specified klass
4293 }
4294
4295
4296 //----------------------------------------------------------------------------------------------------
4297 // Breakpoints
_breakpoint()4298 void TemplateTable::_breakpoint() {
4299 // Note: We get here even if we are single stepping..
4300 // jbug insists on setting breakpoints at every bytecode
4301 // even if we are in single step mode.
4302
4303 transition(vtos, vtos);
4304
4305 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4306
4307 // get the unpatched byte code
4308 __ get_method(rarg);
4309 __ call_VM(noreg,
4310 CAST_FROM_FN_PTR(address,
4311 InterpreterRuntime::get_original_bytecode_at),
4312 rarg, rbcp);
4313 __ mov(rbx, rax); // why?
4314
4315 // post the breakpoint event
4316 __ get_method(rarg);
4317 __ call_VM(noreg,
4318 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4319 rarg, rbcp);
4320
4321 // complete the execution of original bytecode
4322 __ dispatch_only_normal(vtos);
4323 }
4324
4325 //-----------------------------------------------------------------------------
4326 // Exceptions
4327
athrow()4328 void TemplateTable::athrow() {
4329 transition(atos, vtos);
4330 __ null_check(rax);
4331 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4332 }
4333
4334 //-----------------------------------------------------------------------------
4335 // Synchronization
4336 //
4337 // Note: monitorenter & exit are symmetric routines; which is reflected
4338 // in the assembly code structure as well
4339 //
4340 // Stack layout:
4341 //
4342 // [expressions ] <--- rsp = expression stack top
4343 // ..
4344 // [expressions ]
4345 // [monitor entry] <--- monitor block top = expression stack bot
4346 // ..
4347 // [monitor entry]
4348 // [frame data ] <--- monitor block bot
4349 // ...
4350 // [saved rbp ] <--- rbp
monitorenter()4351 void TemplateTable::monitorenter() {
4352 transition(atos, vtos);
4353
4354 // check for NULL object
4355 __ null_check(rax);
4356
4357 __ resolve(IS_NOT_NULL, rax);
4358
4359 const Address monitor_block_top(
4360 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4361 const Address monitor_block_bot(
4362 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4363 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4364
4365 Label allocated;
4366
4367 Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4368 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4369 Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4370
4371 // initialize entry pointer
4372 __ xorl(rmon, rmon); // points to free slot or NULL
4373
4374 // find a free slot in the monitor block (result in rmon)
4375 {
4376 Label entry, loop, exit;
4377 __ movptr(rtop, monitor_block_top); // points to current entry,
4378 // starting with top-most entry
4379 __ lea(rbot, monitor_block_bot); // points to word before bottom
4380 // of monitor block
4381 __ jmpb(entry);
4382
4383 __ bind(loop);
4384 // check if current entry is used
4385 __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4386 // if not used then remember entry in rmon
4387 __ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr
4388 // check if current entry is for same object
4389 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4390 // if same object then stop searching
4391 __ jccb(Assembler::equal, exit);
4392 // otherwise advance to next entry
4393 __ addptr(rtop, entry_size);
4394 __ bind(entry);
4395 // check if bottom reached
4396 __ cmpptr(rtop, rbot);
4397 // if not at bottom then check this entry
4398 __ jcc(Assembler::notEqual, loop);
4399 __ bind(exit);
4400 }
4401
4402 __ testptr(rmon, rmon); // check if a slot has been found
4403 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4404
4405 // allocate one if there's no free slot
4406 {
4407 Label entry, loop;
4408 // 1. compute new pointers // rsp: old expression stack top
4409 __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4410 __ subptr(rsp, entry_size); // move expression stack top
4411 __ subptr(rmon, entry_size); // move expression stack bottom
4412 __ mov(rtop, rsp); // set start value for copy loop
4413 __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4414 __ jmp(entry);
4415 // 2. move expression stack contents
4416 __ bind(loop);
4417 __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4418 // word from old location
4419 __ movptr(Address(rtop, 0), rbot); // and store it at new location
4420 __ addptr(rtop, wordSize); // advance to next word
4421 __ bind(entry);
4422 __ cmpptr(rtop, rmon); // check if bottom reached
4423 __ jcc(Assembler::notEqual, loop); // if not at bottom then
4424 // copy next word
4425 }
4426
4427 // call run-time routine
4428 // rmon: points to monitor entry
4429 __ bind(allocated);
4430
4431 // Increment bcp to point to the next bytecode, so exception
4432 // handling for async. exceptions work correctly.
4433 // The object has already been poped from the stack, so the
4434 // expression stack looks correct.
4435 __ increment(rbcp);
4436
4437 // store object
4438 __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4439 __ lock_object(rmon);
4440
4441 // check to make sure this monitor doesn't cause stack overflow after locking
4442 __ save_bcp(); // in case of exception
4443 __ generate_stack_overflow_check(0);
4444
4445 // The bcp has already been incremented. Just need to dispatch to
4446 // next instruction.
4447 __ dispatch_next(vtos);
4448 }
4449
monitorexit()4450 void TemplateTable::monitorexit() {
4451 transition(atos, vtos);
4452
4453 // check for NULL object
4454 __ null_check(rax);
4455
4456 __ resolve(IS_NOT_NULL, rax);
4457
4458 const Address monitor_block_top(
4459 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4460 const Address monitor_block_bot(
4461 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4462 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4463
4464 Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4465 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4466
4467 Label found;
4468
4469 // find matching slot
4470 {
4471 Label entry, loop;
4472 __ movptr(rtop, monitor_block_top); // points to current entry,
4473 // starting with top-most entry
4474 __ lea(rbot, monitor_block_bot); // points to word before bottom
4475 // of monitor block
4476 __ jmpb(entry);
4477
4478 __ bind(loop);
4479 // check if current entry is for same object
4480 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4481 // if same object then stop searching
4482 __ jcc(Assembler::equal, found);
4483 // otherwise advance to next entry
4484 __ addptr(rtop, entry_size);
4485 __ bind(entry);
4486 // check if bottom reached
4487 __ cmpptr(rtop, rbot);
4488 // if not at bottom then check this entry
4489 __ jcc(Assembler::notEqual, loop);
4490 }
4491
4492 // error handling. Unlocking was not block-structured
4493 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4494 InterpreterRuntime::throw_illegal_monitor_state_exception));
4495 __ should_not_reach_here();
4496
4497 // call run-time routine
4498 __ bind(found);
4499 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4500 __ unlock_object(rtop);
4501 __ pop_ptr(rax); // discard object
4502 }
4503
4504 // Wide instructions
wide()4505 void TemplateTable::wide() {
4506 transition(vtos, vtos);
4507 __ load_unsigned_byte(rbx, at_bcp(1));
4508 ExternalAddress wtable((address)Interpreter::_wentry_point);
4509 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4510 // Note: the rbcp increment step is part of the individual wide bytecode implementations
4511 }
4512
4513 // Multi arrays
multianewarray()4514 void TemplateTable::multianewarray() {
4515 transition(vtos, atos);
4516
4517 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4518 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4519 // last dim is on top of stack; we want address of first one:
4520 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4521 // the latter wordSize to point to the beginning of the array.
4522 __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4523 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4524 __ load_unsigned_byte(rbx, at_bcp(3));
4525 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
4526 }
4527