1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "compiler/disassembler.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "interpreter/interp_masm.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/safepointMechanism.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "runtime/synchronizer.hpp"
42 #include "utilities/macros.hpp"
43
44 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
45
46 // Global Register Names
47 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
48 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
49
50 // Platform-dependent initialization
pd_initialize()51 void TemplateTable::pd_initialize() {
52 // No x86 specific initialization
53 }
54
55 // Address Computation: local variables
iaddress(int n)56 static inline Address iaddress(int n) {
57 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
58 }
59
laddress(int n)60 static inline Address laddress(int n) {
61 return iaddress(n + 1);
62 }
63
64 #ifndef _LP64
haddress(int n)65 static inline Address haddress(int n) {
66 return iaddress(n + 0);
67 }
68 #endif
69
faddress(int n)70 static inline Address faddress(int n) {
71 return iaddress(n);
72 }
73
daddress(int n)74 static inline Address daddress(int n) {
75 return laddress(n);
76 }
77
aaddress(int n)78 static inline Address aaddress(int n) {
79 return iaddress(n);
80 }
81
iaddress(Register r)82 static inline Address iaddress(Register r) {
83 return Address(rlocals, r, Address::times_ptr);
84 }
85
laddress(Register r)86 static inline Address laddress(Register r) {
87 return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
88 }
89
90 #ifndef _LP64
haddress(Register r)91 static inline Address haddress(Register r) {
92 return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
93 }
94 #endif
95
faddress(Register r)96 static inline Address faddress(Register r) {
97 return iaddress(r);
98 }
99
daddress(Register r)100 static inline Address daddress(Register r) {
101 return laddress(r);
102 }
103
aaddress(Register r)104 static inline Address aaddress(Register r) {
105 return iaddress(r);
106 }
107
108
109 // expression stack
110 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
111 // data beyond the rsp which is potentially unsafe in an MT environment;
112 // an interrupt may overwrite that data.)
at_rsp()113 static inline Address at_rsp () {
114 return Address(rsp, 0);
115 }
116
117 // At top of Java expression stack which may be different than esp(). It
118 // isn't for category 1 objects.
at_tos()119 static inline Address at_tos () {
120 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
121 }
122
at_tos_p1()123 static inline Address at_tos_p1() {
124 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
125 }
126
at_tos_p2()127 static inline Address at_tos_p2() {
128 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
129 }
130
131 // Condition conversion
j_not(TemplateTable::Condition cc)132 static Assembler::Condition j_not(TemplateTable::Condition cc) {
133 switch (cc) {
134 case TemplateTable::equal : return Assembler::notEqual;
135 case TemplateTable::not_equal : return Assembler::equal;
136 case TemplateTable::less : return Assembler::greaterEqual;
137 case TemplateTable::less_equal : return Assembler::greater;
138 case TemplateTable::greater : return Assembler::lessEqual;
139 case TemplateTable::greater_equal: return Assembler::less;
140 }
141 ShouldNotReachHere();
142 return Assembler::zero;
143 }
144
145
146
147 // Miscelaneous helper routines
148 // Store an oop (or NULL) at the address described by obj.
149 // If val == noreg this means store a NULL
150
151
do_oop_store(InterpreterMacroAssembler * _masm,Address dst,Register val,DecoratorSet decorators=0)152 static void do_oop_store(InterpreterMacroAssembler* _masm,
153 Address dst,
154 Register val,
155 DecoratorSet decorators = 0) {
156 assert(val == noreg || val == rax, "parameter is just for looks");
157 __ store_heap_oop(dst, val, rdx, rbx, decorators);
158 }
159
do_oop_load(InterpreterMacroAssembler * _masm,Address src,Register dst,DecoratorSet decorators=0)160 static void do_oop_load(InterpreterMacroAssembler* _masm,
161 Address src,
162 Register dst,
163 DecoratorSet decorators = 0) {
164 __ load_heap_oop(dst, src, rdx, rbx, decorators);
165 }
166
at_bcp(int offset)167 Address TemplateTable::at_bcp(int offset) {
168 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
169 return Address(rbcp, offset);
170 }
171
172
patch_bytecode(Bytecodes::Code bc,Register bc_reg,Register temp_reg,bool load_bc_into_bc_reg,int byte_no)173 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
174 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
175 int byte_no) {
176 if (!RewriteBytecodes) return;
177 Label L_patch_done;
178
179 switch (bc) {
180 case Bytecodes::_fast_aputfield:
181 case Bytecodes::_fast_bputfield:
182 case Bytecodes::_fast_zputfield:
183 case Bytecodes::_fast_cputfield:
184 case Bytecodes::_fast_dputfield:
185 case Bytecodes::_fast_fputfield:
186 case Bytecodes::_fast_iputfield:
187 case Bytecodes::_fast_lputfield:
188 case Bytecodes::_fast_sputfield:
189 {
190 // We skip bytecode quickening for putfield instructions when
191 // the put_code written to the constant pool cache is zero.
192 // This is required so that every execution of this instruction
193 // calls out to InterpreterRuntime::resolve_get_put to do
194 // additional, required work.
195 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
196 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
197 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
198 __ movl(bc_reg, bc);
199 __ cmpl(temp_reg, (int) 0);
200 __ jcc(Assembler::zero, L_patch_done); // don't patch
201 }
202 break;
203 default:
204 assert(byte_no == -1, "sanity");
205 // the pair bytecodes have already done the load.
206 if (load_bc_into_bc_reg) {
207 __ movl(bc_reg, bc);
208 }
209 }
210
211 if (JvmtiExport::can_post_breakpoint()) {
212 Label L_fast_patch;
213 // if a breakpoint is present we can't rewrite the stream directly
214 __ movzbl(temp_reg, at_bcp(0));
215 __ cmpl(temp_reg, Bytecodes::_breakpoint);
216 __ jcc(Assembler::notEqual, L_fast_patch);
217 __ get_method(temp_reg);
218 // Let breakpoint table handling rewrite to quicker bytecode
219 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
220 #ifndef ASSERT
221 __ jmpb(L_patch_done);
222 #else
223 __ jmp(L_patch_done);
224 #endif
225 __ bind(L_fast_patch);
226 }
227
228 #ifdef ASSERT
229 Label L_okay;
230 __ load_unsigned_byte(temp_reg, at_bcp(0));
231 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
232 __ jcc(Assembler::equal, L_okay);
233 __ cmpl(temp_reg, bc_reg);
234 __ jcc(Assembler::equal, L_okay);
235 __ stop("patching the wrong bytecode");
236 __ bind(L_okay);
237 #endif
238
239 // patch bytecode
240 __ movb(at_bcp(0), bc_reg);
241 __ bind(L_patch_done);
242 }
243 // Individual instructions
244
245
nop()246 void TemplateTable::nop() {
247 transition(vtos, vtos);
248 // nothing to do
249 }
250
shouldnotreachhere()251 void TemplateTable::shouldnotreachhere() {
252 transition(vtos, vtos);
253 __ stop("shouldnotreachhere bytecode");
254 }
255
aconst_null()256 void TemplateTable::aconst_null() {
257 transition(vtos, atos);
258 __ xorl(rax, rax);
259 }
260
iconst(int value)261 void TemplateTable::iconst(int value) {
262 transition(vtos, itos);
263 if (value == 0) {
264 __ xorl(rax, rax);
265 } else {
266 __ movl(rax, value);
267 }
268 }
269
lconst(int value)270 void TemplateTable::lconst(int value) {
271 transition(vtos, ltos);
272 if (value == 0) {
273 __ xorl(rax, rax);
274 } else {
275 __ movl(rax, value);
276 }
277 #ifndef _LP64
278 assert(value >= 0, "check this code");
279 __ xorptr(rdx, rdx);
280 #endif
281 }
282
283
284
fconst(int value)285 void TemplateTable::fconst(int value) {
286 transition(vtos, ftos);
287 if (UseSSE >= 1) {
288 static float one = 1.0f, two = 2.0f;
289 switch (value) {
290 case 0:
291 __ xorps(xmm0, xmm0);
292 break;
293 case 1:
294 __ movflt(xmm0, ExternalAddress((address) &one));
295 break;
296 case 2:
297 __ movflt(xmm0, ExternalAddress((address) &two));
298 break;
299 default:
300 ShouldNotReachHere();
301 break;
302 }
303 } else {
304 #ifdef _LP64
305 ShouldNotReachHere();
306 #else
307 if (value == 0) { __ fldz();
308 } else if (value == 1) { __ fld1();
309 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
310 } else { ShouldNotReachHere();
311 }
312 #endif // _LP64
313 }
314 }
315
dconst(int value)316 void TemplateTable::dconst(int value) {
317 transition(vtos, dtos);
318 if (UseSSE >= 2) {
319 static double one = 1.0;
320 switch (value) {
321 case 0:
322 __ xorpd(xmm0, xmm0);
323 break;
324 case 1:
325 __ movdbl(xmm0, ExternalAddress((address) &one));
326 break;
327 default:
328 ShouldNotReachHere();
329 break;
330 }
331 } else {
332 #ifdef _LP64
333 ShouldNotReachHere();
334 #else
335 if (value == 0) { __ fldz();
336 } else if (value == 1) { __ fld1();
337 } else { ShouldNotReachHere();
338 }
339 #endif
340 }
341 }
342
bipush()343 void TemplateTable::bipush() {
344 transition(vtos, itos);
345 __ load_signed_byte(rax, at_bcp(1));
346 }
347
sipush()348 void TemplateTable::sipush() {
349 transition(vtos, itos);
350 __ load_unsigned_short(rax, at_bcp(1));
351 __ bswapl(rax);
352 __ sarl(rax, 16);
353 }
354
ldc(bool wide)355 void TemplateTable::ldc(bool wide) {
356 transition(vtos, vtos);
357 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
358 Label call_ldc, notFloat, notClass, notInt, Done;
359
360 if (wide) {
361 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
362 } else {
363 __ load_unsigned_byte(rbx, at_bcp(1));
364 }
365
366 __ get_cpool_and_tags(rcx, rax);
367 const int base_offset = ConstantPool::header_size() * wordSize;
368 const int tags_offset = Array<u1>::base_offset_in_bytes();
369
370 // get type
371 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
372
373 // unresolved class - get the resolved class
374 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
375 __ jccb(Assembler::equal, call_ldc);
376
377 // unresolved class in error state - call into runtime to throw the error
378 // from the first resolution attempt
379 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
380 __ jccb(Assembler::equal, call_ldc);
381
382 // resolved class - need to call vm to get java mirror of the class
383 __ cmpl(rdx, JVM_CONSTANT_Class);
384 __ jcc(Assembler::notEqual, notClass);
385
386 __ bind(call_ldc);
387
388 __ movl(rarg, wide);
389 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
390
391 __ push(atos);
392 __ jmp(Done);
393
394 __ bind(notClass);
395 __ cmpl(rdx, JVM_CONSTANT_Float);
396 __ jccb(Assembler::notEqual, notFloat);
397
398 // ftos
399 __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
400 __ push(ftos);
401 __ jmp(Done);
402
403 __ bind(notFloat);
404 __ cmpl(rdx, JVM_CONSTANT_Integer);
405 __ jccb(Assembler::notEqual, notInt);
406
407 // itos
408 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
409 __ push(itos);
410 __ jmp(Done);
411
412 // assume the tag is for condy; if not, the VM runtime will tell us
413 __ bind(notInt);
414 condy_helper(Done);
415
416 __ bind(Done);
417 }
418
419 // Fast path for caching oop constants.
fast_aldc(bool wide)420 void TemplateTable::fast_aldc(bool wide) {
421 transition(vtos, atos);
422
423 Register result = rax;
424 Register tmp = rdx;
425 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
426 int index_size = wide ? sizeof(u2) : sizeof(u1);
427
428 Label resolved;
429
430 // We are resolved if the resolved reference cache entry contains a
431 // non-null object (String, MethodType, etc.)
432 assert_different_registers(result, tmp);
433 __ get_cache_index_at_bcp(tmp, 1, index_size);
434 __ load_resolved_reference_at_index(result, tmp);
435 __ testptr(result, result);
436 __ jcc(Assembler::notZero, resolved);
437
438 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
439
440 // first time invocation - must resolve first
441 __ movl(rarg, (int)bytecode());
442 __ call_VM(result, entry, rarg);
443 __ bind(resolved);
444
445 { // Check for the null sentinel.
446 // If we just called the VM, it already did the mapping for us,
447 // but it's harmless to retry.
448 Label notNull;
449 ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
450 __ movptr(tmp, null_sentinel);
451 __ cmpoop(tmp, result);
452 __ jccb(Assembler::notEqual, notNull);
453 __ xorptr(result, result); // NULL object reference
454 __ bind(notNull);
455 }
456
457 if (VerifyOops) {
458 __ verify_oop(result);
459 }
460 }
461
ldc2_w()462 void TemplateTable::ldc2_w() {
463 transition(vtos, vtos);
464 Label notDouble, notLong, Done;
465 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
466
467 __ get_cpool_and_tags(rcx, rax);
468 const int base_offset = ConstantPool::header_size() * wordSize;
469 const int tags_offset = Array<u1>::base_offset_in_bytes();
470
471 // get type
472 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
473 __ cmpl(rdx, JVM_CONSTANT_Double);
474 __ jccb(Assembler::notEqual, notDouble);
475
476 // dtos
477 __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
478 __ push(dtos);
479
480 __ jmp(Done);
481 __ bind(notDouble);
482 __ cmpl(rdx, JVM_CONSTANT_Long);
483 __ jccb(Assembler::notEqual, notLong);
484
485 // ltos
486 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
487 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
488 __ push(ltos);
489 __ jmp(Done);
490
491 __ bind(notLong);
492 condy_helper(Done);
493
494 __ bind(Done);
495 }
496
condy_helper(Label & Done)497 void TemplateTable::condy_helper(Label& Done) {
498 const Register obj = rax;
499 const Register off = rbx;
500 const Register flags = rcx;
501 const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
502 __ movl(rarg, (int)bytecode());
503 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
504 #ifndef _LP64
505 // borrow rdi from locals
506 __ get_thread(rdi);
507 __ get_vm_result_2(flags, rdi);
508 __ restore_locals();
509 #else
510 __ get_vm_result_2(flags, r15_thread);
511 #endif
512 // VMr = obj = base address to find primitive value to push
513 // VMr2 = flags = (tos, off) using format of CPCE::_flags
514 __ movl(off, flags);
515 __ andl(off, ConstantPoolCacheEntry::field_index_mask);
516 const Address field(obj, off, Address::times_1, 0*wordSize);
517
518 // What sort of thing are we loading?
519 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
520 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
521
522 switch (bytecode()) {
523 case Bytecodes::_ldc:
524 case Bytecodes::_ldc_w:
525 {
526 // tos in (itos, ftos, stos, btos, ctos, ztos)
527 Label notInt, notFloat, notShort, notByte, notChar, notBool;
528 __ cmpl(flags, itos);
529 __ jcc(Assembler::notEqual, notInt);
530 // itos
531 __ movl(rax, field);
532 __ push(itos);
533 __ jmp(Done);
534
535 __ bind(notInt);
536 __ cmpl(flags, ftos);
537 __ jcc(Assembler::notEqual, notFloat);
538 // ftos
539 __ load_float(field);
540 __ push(ftos);
541 __ jmp(Done);
542
543 __ bind(notFloat);
544 __ cmpl(flags, stos);
545 __ jcc(Assembler::notEqual, notShort);
546 // stos
547 __ load_signed_short(rax, field);
548 __ push(stos);
549 __ jmp(Done);
550
551 __ bind(notShort);
552 __ cmpl(flags, btos);
553 __ jcc(Assembler::notEqual, notByte);
554 // btos
555 __ load_signed_byte(rax, field);
556 __ push(btos);
557 __ jmp(Done);
558
559 __ bind(notByte);
560 __ cmpl(flags, ctos);
561 __ jcc(Assembler::notEqual, notChar);
562 // ctos
563 __ load_unsigned_short(rax, field);
564 __ push(ctos);
565 __ jmp(Done);
566
567 __ bind(notChar);
568 __ cmpl(flags, ztos);
569 __ jcc(Assembler::notEqual, notBool);
570 // ztos
571 __ load_signed_byte(rax, field);
572 __ push(ztos);
573 __ jmp(Done);
574
575 __ bind(notBool);
576 break;
577 }
578
579 case Bytecodes::_ldc2_w:
580 {
581 Label notLong, notDouble;
582 __ cmpl(flags, ltos);
583 __ jcc(Assembler::notEqual, notLong);
584 // ltos
585 // Loading high word first because movptr clobbers rax
586 NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
587 __ movptr(rax, field);
588 __ push(ltos);
589 __ jmp(Done);
590
591 __ bind(notLong);
592 __ cmpl(flags, dtos);
593 __ jcc(Assembler::notEqual, notDouble);
594 // dtos
595 __ load_double(field);
596 __ push(dtos);
597 __ jmp(Done);
598
599 __ bind(notDouble);
600 break;
601 }
602
603 default:
604 ShouldNotReachHere();
605 }
606
607 __ stop("bad ldc/condy");
608 }
609
locals_index(Register reg,int offset)610 void TemplateTable::locals_index(Register reg, int offset) {
611 __ load_unsigned_byte(reg, at_bcp(offset));
612 __ negptr(reg);
613 }
614
iload()615 void TemplateTable::iload() {
616 iload_internal();
617 }
618
nofast_iload()619 void TemplateTable::nofast_iload() {
620 iload_internal(may_not_rewrite);
621 }
622
iload_internal(RewriteControl rc)623 void TemplateTable::iload_internal(RewriteControl rc) {
624 transition(vtos, itos);
625 if (RewriteFrequentPairs && rc == may_rewrite) {
626 Label rewrite, done;
627 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
628 LP64_ONLY(assert(rbx != bc, "register damaged"));
629
630 // get next byte
631 __ load_unsigned_byte(rbx,
632 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
633 // if _iload, wait to rewrite to iload2. We only want to rewrite the
634 // last two iloads in a pair. Comparing against fast_iload means that
635 // the next bytecode is neither an iload or a caload, and therefore
636 // an iload pair.
637 __ cmpl(rbx, Bytecodes::_iload);
638 __ jcc(Assembler::equal, done);
639
640 __ cmpl(rbx, Bytecodes::_fast_iload);
641 __ movl(bc, Bytecodes::_fast_iload2);
642
643 __ jccb(Assembler::equal, rewrite);
644
645 // if _caload, rewrite to fast_icaload
646 __ cmpl(rbx, Bytecodes::_caload);
647 __ movl(bc, Bytecodes::_fast_icaload);
648 __ jccb(Assembler::equal, rewrite);
649
650 // rewrite so iload doesn't check again.
651 __ movl(bc, Bytecodes::_fast_iload);
652
653 // rewrite
654 // bc: fast bytecode
655 __ bind(rewrite);
656 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
657 __ bind(done);
658 }
659
660 // Get the local value into tos
661 locals_index(rbx);
662 __ movl(rax, iaddress(rbx));
663 }
664
fast_iload2()665 void TemplateTable::fast_iload2() {
666 transition(vtos, itos);
667 locals_index(rbx);
668 __ movl(rax, iaddress(rbx));
669 __ push(itos);
670 locals_index(rbx, 3);
671 __ movl(rax, iaddress(rbx));
672 }
673
fast_iload()674 void TemplateTable::fast_iload() {
675 transition(vtos, itos);
676 locals_index(rbx);
677 __ movl(rax, iaddress(rbx));
678 }
679
lload()680 void TemplateTable::lload() {
681 transition(vtos, ltos);
682 locals_index(rbx);
683 __ movptr(rax, laddress(rbx));
684 NOT_LP64(__ movl(rdx, haddress(rbx)));
685 }
686
fload()687 void TemplateTable::fload() {
688 transition(vtos, ftos);
689 locals_index(rbx);
690 __ load_float(faddress(rbx));
691 }
692
dload()693 void TemplateTable::dload() {
694 transition(vtos, dtos);
695 locals_index(rbx);
696 __ load_double(daddress(rbx));
697 }
698
aload()699 void TemplateTable::aload() {
700 transition(vtos, atos);
701 locals_index(rbx);
702 __ movptr(rax, aaddress(rbx));
703 }
704
locals_index_wide(Register reg)705 void TemplateTable::locals_index_wide(Register reg) {
706 __ load_unsigned_short(reg, at_bcp(2));
707 __ bswapl(reg);
708 __ shrl(reg, 16);
709 __ negptr(reg);
710 }
711
wide_iload()712 void TemplateTable::wide_iload() {
713 transition(vtos, itos);
714 locals_index_wide(rbx);
715 __ movl(rax, iaddress(rbx));
716 }
717
wide_lload()718 void TemplateTable::wide_lload() {
719 transition(vtos, ltos);
720 locals_index_wide(rbx);
721 __ movptr(rax, laddress(rbx));
722 NOT_LP64(__ movl(rdx, haddress(rbx)));
723 }
724
wide_fload()725 void TemplateTable::wide_fload() {
726 transition(vtos, ftos);
727 locals_index_wide(rbx);
728 __ load_float(faddress(rbx));
729 }
730
wide_dload()731 void TemplateTable::wide_dload() {
732 transition(vtos, dtos);
733 locals_index_wide(rbx);
734 __ load_double(daddress(rbx));
735 }
736
wide_aload()737 void TemplateTable::wide_aload() {
738 transition(vtos, atos);
739 locals_index_wide(rbx);
740 __ movptr(rax, aaddress(rbx));
741 }
742
index_check(Register array,Register index)743 void TemplateTable::index_check(Register array, Register index) {
744 // Pop ptr into array
745 __ pop_ptr(array);
746 index_check_without_pop(array, index);
747 }
748
index_check_without_pop(Register array,Register index)749 void TemplateTable::index_check_without_pop(Register array, Register index) {
750 // destroys rbx
751 // check array
752 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
753 // sign extend index for use by indexed load
754 __ movl2ptr(index, index);
755 // check index
756 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
757 if (index != rbx) {
758 // ??? convention: move aberrant index into rbx for exception message
759 assert(rbx != array, "different registers");
760 __ movl(rbx, index);
761 }
762 Label skip;
763 __ jccb(Assembler::below, skip);
764 // Pass array to create more detailed exceptions.
765 __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
766 __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
767 __ bind(skip);
768 }
769
iaload()770 void TemplateTable::iaload() {
771 transition(itos, itos);
772 // rax: index
773 // rdx: array
774 index_check(rdx, rax); // kills rbx
775 __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
776 Address(rdx, rax, Address::times_4,
777 arrayOopDesc::base_offset_in_bytes(T_INT)),
778 noreg, noreg);
779 }
780
laload()781 void TemplateTable::laload() {
782 transition(itos, ltos);
783 // rax: index
784 // rdx: array
785 index_check(rdx, rax); // kills rbx
786 NOT_LP64(__ mov(rbx, rax));
787 // rbx,: index
788 __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
789 Address(rdx, rbx, Address::times_8,
790 arrayOopDesc::base_offset_in_bytes(T_LONG)),
791 noreg, noreg);
792 }
793
794
795
faload()796 void TemplateTable::faload() {
797 transition(itos, ftos);
798 // rax: index
799 // rdx: array
800 index_check(rdx, rax); // kills rbx
801 __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
802 Address(rdx, rax,
803 Address::times_4,
804 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
805 noreg, noreg);
806 }
807
daload()808 void TemplateTable::daload() {
809 transition(itos, dtos);
810 // rax: index
811 // rdx: array
812 index_check(rdx, rax); // kills rbx
813 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
814 Address(rdx, rax,
815 Address::times_8,
816 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
817 noreg, noreg);
818 }
819
aaload()820 void TemplateTable::aaload() {
821 transition(itos, atos);
822 // rax: index
823 // rdx: array
824 index_check(rdx, rax); // kills rbx
825 do_oop_load(_masm,
826 Address(rdx, rax,
827 UseCompressedOops ? Address::times_4 : Address::times_ptr,
828 arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
829 rax,
830 IS_ARRAY);
831 }
832
baload()833 void TemplateTable::baload() {
834 transition(itos, itos);
835 // rax: index
836 // rdx: array
837 index_check(rdx, rax); // kills rbx
838 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
839 Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
840 noreg, noreg);
841 }
842
caload()843 void TemplateTable::caload() {
844 transition(itos, itos);
845 // rax: index
846 // rdx: array
847 index_check(rdx, rax); // kills rbx
848 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
849 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
850 noreg, noreg);
851 }
852
853 // iload followed by caload frequent pair
fast_icaload()854 void TemplateTable::fast_icaload() {
855 transition(vtos, itos);
856 // load index out of locals
857 locals_index(rbx);
858 __ movl(rax, iaddress(rbx));
859
860 // rax: index
861 // rdx: array
862 index_check(rdx, rax); // kills rbx
863 __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
864 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
865 noreg, noreg);
866 }
867
868
saload()869 void TemplateTable::saload() {
870 transition(itos, itos);
871 // rax: index
872 // rdx: array
873 index_check(rdx, rax); // kills rbx
874 __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
875 Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
876 noreg, noreg);
877 }
878
iload(int n)879 void TemplateTable::iload(int n) {
880 transition(vtos, itos);
881 __ movl(rax, iaddress(n));
882 }
883
lload(int n)884 void TemplateTable::lload(int n) {
885 transition(vtos, ltos);
886 __ movptr(rax, laddress(n));
887 NOT_LP64(__ movptr(rdx, haddress(n)));
888 }
889
fload(int n)890 void TemplateTable::fload(int n) {
891 transition(vtos, ftos);
892 __ load_float(faddress(n));
893 }
894
dload(int n)895 void TemplateTable::dload(int n) {
896 transition(vtos, dtos);
897 __ load_double(daddress(n));
898 }
899
aload(int n)900 void TemplateTable::aload(int n) {
901 transition(vtos, atos);
902 __ movptr(rax, aaddress(n));
903 }
904
aload_0()905 void TemplateTable::aload_0() {
906 aload_0_internal();
907 }
908
nofast_aload_0()909 void TemplateTable::nofast_aload_0() {
910 aload_0_internal(may_not_rewrite);
911 }
912
aload_0_internal(RewriteControl rc)913 void TemplateTable::aload_0_internal(RewriteControl rc) {
914 transition(vtos, atos);
915 // According to bytecode histograms, the pairs:
916 //
917 // _aload_0, _fast_igetfield
918 // _aload_0, _fast_agetfield
919 // _aload_0, _fast_fgetfield
920 //
921 // occur frequently. If RewriteFrequentPairs is set, the (slow)
922 // _aload_0 bytecode checks if the next bytecode is either
923 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
924 // rewrites the current bytecode into a pair bytecode; otherwise it
925 // rewrites the current bytecode into _fast_aload_0 that doesn't do
926 // the pair check anymore.
927 //
928 // Note: If the next bytecode is _getfield, the rewrite must be
929 // delayed, otherwise we may miss an opportunity for a pair.
930 //
931 // Also rewrite frequent pairs
932 // aload_0, aload_1
933 // aload_0, iload_1
934 // These bytecodes with a small amount of code are most profitable
935 // to rewrite
936 if (RewriteFrequentPairs && rc == may_rewrite) {
937 Label rewrite, done;
938
939 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
940 LP64_ONLY(assert(rbx != bc, "register damaged"));
941
942 // get next byte
943 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
944
945 // if _getfield then wait with rewrite
946 __ cmpl(rbx, Bytecodes::_getfield);
947 __ jcc(Assembler::equal, done);
948
949 // if _igetfield then rewrite to _fast_iaccess_0
950 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
951 __ cmpl(rbx, Bytecodes::_fast_igetfield);
952 __ movl(bc, Bytecodes::_fast_iaccess_0);
953 __ jccb(Assembler::equal, rewrite);
954
955 // if _agetfield then rewrite to _fast_aaccess_0
956 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
957 __ cmpl(rbx, Bytecodes::_fast_agetfield);
958 __ movl(bc, Bytecodes::_fast_aaccess_0);
959 __ jccb(Assembler::equal, rewrite);
960
961 // if _fgetfield then rewrite to _fast_faccess_0
962 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
963 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
964 __ movl(bc, Bytecodes::_fast_faccess_0);
965 __ jccb(Assembler::equal, rewrite);
966
967 // else rewrite to _fast_aload0
968 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
969 __ movl(bc, Bytecodes::_fast_aload_0);
970
971 // rewrite
972 // bc: fast bytecode
973 __ bind(rewrite);
974 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
975
976 __ bind(done);
977 }
978
979 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
980 aload(0);
981 }
982
istore()983 void TemplateTable::istore() {
984 transition(itos, vtos);
985 locals_index(rbx);
986 __ movl(iaddress(rbx), rax);
987 }
988
989
lstore()990 void TemplateTable::lstore() {
991 transition(ltos, vtos);
992 locals_index(rbx);
993 __ movptr(laddress(rbx), rax);
994 NOT_LP64(__ movptr(haddress(rbx), rdx));
995 }
996
fstore()997 void TemplateTable::fstore() {
998 transition(ftos, vtos);
999 locals_index(rbx);
1000 __ store_float(faddress(rbx));
1001 }
1002
dstore()1003 void TemplateTable::dstore() {
1004 transition(dtos, vtos);
1005 locals_index(rbx);
1006 __ store_double(daddress(rbx));
1007 }
1008
astore()1009 void TemplateTable::astore() {
1010 transition(vtos, vtos);
1011 __ pop_ptr(rax);
1012 locals_index(rbx);
1013 __ movptr(aaddress(rbx), rax);
1014 }
1015
wide_istore()1016 void TemplateTable::wide_istore() {
1017 transition(vtos, vtos);
1018 __ pop_i();
1019 locals_index_wide(rbx);
1020 __ movl(iaddress(rbx), rax);
1021 }
1022
wide_lstore()1023 void TemplateTable::wide_lstore() {
1024 transition(vtos, vtos);
1025 NOT_LP64(__ pop_l(rax, rdx));
1026 LP64_ONLY(__ pop_l());
1027 locals_index_wide(rbx);
1028 __ movptr(laddress(rbx), rax);
1029 NOT_LP64(__ movl(haddress(rbx), rdx));
1030 }
1031
wide_fstore()1032 void TemplateTable::wide_fstore() {
1033 #ifdef _LP64
1034 transition(vtos, vtos);
1035 __ pop_f(xmm0);
1036 locals_index_wide(rbx);
1037 __ movflt(faddress(rbx), xmm0);
1038 #else
1039 wide_istore();
1040 #endif
1041 }
1042
wide_dstore()1043 void TemplateTable::wide_dstore() {
1044 #ifdef _LP64
1045 transition(vtos, vtos);
1046 __ pop_d(xmm0);
1047 locals_index_wide(rbx);
1048 __ movdbl(daddress(rbx), xmm0);
1049 #else
1050 wide_lstore();
1051 #endif
1052 }
1053
wide_astore()1054 void TemplateTable::wide_astore() {
1055 transition(vtos, vtos);
1056 __ pop_ptr(rax);
1057 locals_index_wide(rbx);
1058 __ movptr(aaddress(rbx), rax);
1059 }
1060
iastore()1061 void TemplateTable::iastore() {
1062 transition(itos, vtos);
1063 __ pop_i(rbx);
1064 // rax: value
1065 // rbx: index
1066 // rdx: array
1067 index_check(rdx, rbx); // prefer index in rbx
1068 __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1069 Address(rdx, rbx, Address::times_4,
1070 arrayOopDesc::base_offset_in_bytes(T_INT)),
1071 rax, noreg, noreg);
1072 }
1073
lastore()1074 void TemplateTable::lastore() {
1075 transition(ltos, vtos);
1076 __ pop_i(rbx);
1077 // rax,: low(value)
1078 // rcx: array
1079 // rdx: high(value)
1080 index_check(rcx, rbx); // prefer index in rbx,
1081 // rbx,: index
1082 __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1083 Address(rcx, rbx, Address::times_8,
1084 arrayOopDesc::base_offset_in_bytes(T_LONG)),
1085 noreg /* ltos */, noreg, noreg);
1086 }
1087
1088
fastore()1089 void TemplateTable::fastore() {
1090 transition(ftos, vtos);
1091 __ pop_i(rbx);
1092 // value is in UseSSE >= 1 ? xmm0 : ST(0)
1093 // rbx: index
1094 // rdx: array
1095 index_check(rdx, rbx); // prefer index in rbx
1096 __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1097 Address(rdx, rbx, Address::times_4,
1098 arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1099 noreg /* ftos */, noreg, noreg);
1100 }
1101
dastore()1102 void TemplateTable::dastore() {
1103 transition(dtos, vtos);
1104 __ pop_i(rbx);
1105 // value is in UseSSE >= 2 ? xmm0 : ST(0)
1106 // rbx: index
1107 // rdx: array
1108 index_check(rdx, rbx); // prefer index in rbx
1109 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1110 Address(rdx, rbx, Address::times_8,
1111 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1112 noreg /* dtos */, noreg, noreg);
1113 }
1114
aastore()1115 void TemplateTable::aastore() {
1116 Label is_null, ok_is_subtype, done;
1117 transition(vtos, vtos);
1118 // stack: ..., array, index, value
1119 __ movptr(rax, at_tos()); // value
1120 __ movl(rcx, at_tos_p1()); // index
1121 __ movptr(rdx, at_tos_p2()); // array
1122
1123 Address element_address(rdx, rcx,
1124 UseCompressedOops? Address::times_4 : Address::times_ptr,
1125 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1126
1127 index_check_without_pop(rdx, rcx); // kills rbx
1128 __ testptr(rax, rax);
1129 __ jcc(Assembler::zero, is_null);
1130
1131 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1132 // Move subklass into rbx
1133 __ load_klass(rbx, rax, tmp_load_klass);
1134 // Move superklass into rax
1135 __ load_klass(rax, rdx, tmp_load_klass);
1136 __ movptr(rax, Address(rax,
1137 ObjArrayKlass::element_klass_offset()));
1138
1139 // Generate subtype check. Blows rcx, rdi
1140 // Superklass in rax. Subklass in rbx.
1141 __ gen_subtype_check(rbx, ok_is_subtype);
1142
1143 // Come here on failure
1144 // object is at TOS
1145 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1146
1147 // Come here on success
1148 __ bind(ok_is_subtype);
1149
1150 // Get the value we will store
1151 __ movptr(rax, at_tos());
1152 __ movl(rcx, at_tos_p1()); // index
1153 // Now store using the appropriate barrier
1154 do_oop_store(_masm, element_address, rax, IS_ARRAY);
1155 __ jmp(done);
1156
1157 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1158 __ bind(is_null);
1159 __ profile_null_seen(rbx);
1160
1161 // Store a NULL
1162 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1163
1164 // Pop stack arguments
1165 __ bind(done);
1166 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1167 }
1168
bastore()1169 void TemplateTable::bastore() {
1170 transition(itos, vtos);
1171 __ pop_i(rbx);
1172 // rax: value
1173 // rbx: index
1174 // rdx: array
1175 index_check(rdx, rbx); // prefer index in rbx
1176 // Need to check whether array is boolean or byte
1177 // since both types share the bastore bytecode.
1178 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1179 __ load_klass(rcx, rdx, tmp_load_klass);
1180 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1181 int diffbit = Klass::layout_helper_boolean_diffbit();
1182 __ testl(rcx, diffbit);
1183 Label L_skip;
1184 __ jccb(Assembler::zero, L_skip);
1185 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1186 __ bind(L_skip);
1187 __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1188 Address(rdx, rbx,Address::times_1,
1189 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1190 rax, noreg, noreg);
1191 }
1192
castore()1193 void TemplateTable::castore() {
1194 transition(itos, vtos);
1195 __ pop_i(rbx);
1196 // rax: value
1197 // rbx: index
1198 // rdx: array
1199 index_check(rdx, rbx); // prefer index in rbx
1200 __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1201 Address(rdx, rbx, Address::times_2,
1202 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1203 rax, noreg, noreg);
1204 }
1205
1206
sastore()1207 void TemplateTable::sastore() {
1208 castore();
1209 }
1210
istore(int n)1211 void TemplateTable::istore(int n) {
1212 transition(itos, vtos);
1213 __ movl(iaddress(n), rax);
1214 }
1215
lstore(int n)1216 void TemplateTable::lstore(int n) {
1217 transition(ltos, vtos);
1218 __ movptr(laddress(n), rax);
1219 NOT_LP64(__ movptr(haddress(n), rdx));
1220 }
1221
fstore(int n)1222 void TemplateTable::fstore(int n) {
1223 transition(ftos, vtos);
1224 __ store_float(faddress(n));
1225 }
1226
dstore(int n)1227 void TemplateTable::dstore(int n) {
1228 transition(dtos, vtos);
1229 __ store_double(daddress(n));
1230 }
1231
1232
astore(int n)1233 void TemplateTable::astore(int n) {
1234 transition(vtos, vtos);
1235 __ pop_ptr(rax);
1236 __ movptr(aaddress(n), rax);
1237 }
1238
pop()1239 void TemplateTable::pop() {
1240 transition(vtos, vtos);
1241 __ addptr(rsp, Interpreter::stackElementSize);
1242 }
1243
pop2()1244 void TemplateTable::pop2() {
1245 transition(vtos, vtos);
1246 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1247 }
1248
1249
dup()1250 void TemplateTable::dup() {
1251 transition(vtos, vtos);
1252 __ load_ptr(0, rax);
1253 __ push_ptr(rax);
1254 // stack: ..., a, a
1255 }
1256
dup_x1()1257 void TemplateTable::dup_x1() {
1258 transition(vtos, vtos);
1259 // stack: ..., a, b
1260 __ load_ptr( 0, rax); // load b
1261 __ load_ptr( 1, rcx); // load a
1262 __ store_ptr(1, rax); // store b
1263 __ store_ptr(0, rcx); // store a
1264 __ push_ptr(rax); // push b
1265 // stack: ..., b, a, b
1266 }
1267
dup_x2()1268 void TemplateTable::dup_x2() {
1269 transition(vtos, vtos);
1270 // stack: ..., a, b, c
1271 __ load_ptr( 0, rax); // load c
1272 __ load_ptr( 2, rcx); // load a
1273 __ store_ptr(2, rax); // store c in a
1274 __ push_ptr(rax); // push c
1275 // stack: ..., c, b, c, c
1276 __ load_ptr( 2, rax); // load b
1277 __ store_ptr(2, rcx); // store a in b
1278 // stack: ..., c, a, c, c
1279 __ store_ptr(1, rax); // store b in c
1280 // stack: ..., c, a, b, c
1281 }
1282
dup2()1283 void TemplateTable::dup2() {
1284 transition(vtos, vtos);
1285 // stack: ..., a, b
1286 __ load_ptr(1, rax); // load a
1287 __ push_ptr(rax); // push a
1288 __ load_ptr(1, rax); // load b
1289 __ push_ptr(rax); // push b
1290 // stack: ..., a, b, a, b
1291 }
1292
1293
dup2_x1()1294 void TemplateTable::dup2_x1() {
1295 transition(vtos, vtos);
1296 // stack: ..., a, b, c
1297 __ load_ptr( 0, rcx); // load c
1298 __ load_ptr( 1, rax); // load b
1299 __ push_ptr(rax); // push b
1300 __ push_ptr(rcx); // push c
1301 // stack: ..., a, b, c, b, c
1302 __ store_ptr(3, rcx); // store c in b
1303 // stack: ..., a, c, c, b, c
1304 __ load_ptr( 4, rcx); // load a
1305 __ store_ptr(2, rcx); // store a in 2nd c
1306 // stack: ..., a, c, a, b, c
1307 __ store_ptr(4, rax); // store b in a
1308 // stack: ..., b, c, a, b, c
1309 }
1310
dup2_x2()1311 void TemplateTable::dup2_x2() {
1312 transition(vtos, vtos);
1313 // stack: ..., a, b, c, d
1314 __ load_ptr( 0, rcx); // load d
1315 __ load_ptr( 1, rax); // load c
1316 __ push_ptr(rax); // push c
1317 __ push_ptr(rcx); // push d
1318 // stack: ..., a, b, c, d, c, d
1319 __ load_ptr( 4, rax); // load b
1320 __ store_ptr(2, rax); // store b in d
1321 __ store_ptr(4, rcx); // store d in b
1322 // stack: ..., a, d, c, b, c, d
1323 __ load_ptr( 5, rcx); // load a
1324 __ load_ptr( 3, rax); // load c
1325 __ store_ptr(3, rcx); // store a in c
1326 __ store_ptr(5, rax); // store c in a
1327 // stack: ..., c, d, a, b, c, d
1328 }
1329
swap()1330 void TemplateTable::swap() {
1331 transition(vtos, vtos);
1332 // stack: ..., a, b
1333 __ load_ptr( 1, rcx); // load a
1334 __ load_ptr( 0, rax); // load b
1335 __ store_ptr(0, rcx); // store a in b
1336 __ store_ptr(1, rax); // store b in a
1337 // stack: ..., b, a
1338 }
1339
iop2(Operation op)1340 void TemplateTable::iop2(Operation op) {
1341 transition(itos, itos);
1342 switch (op) {
1343 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1344 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1345 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1346 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1347 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1348 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1349 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1350 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1351 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1352 default : ShouldNotReachHere();
1353 }
1354 }
1355
lop2(Operation op)1356 void TemplateTable::lop2(Operation op) {
1357 transition(ltos, ltos);
1358 #ifdef _LP64
1359 switch (op) {
1360 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1361 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1362 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1363 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1364 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1365 default : ShouldNotReachHere();
1366 }
1367 #else
1368 __ pop_l(rbx, rcx);
1369 switch (op) {
1370 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1371 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1372 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1373 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1374 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1375 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1376 default : ShouldNotReachHere();
1377 }
1378 #endif
1379 }
1380
idiv()1381 void TemplateTable::idiv() {
1382 transition(itos, itos);
1383 __ movl(rcx, rax);
1384 __ pop_i(rax);
1385 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1386 // they are not equal, one could do a normal division (no correction
1387 // needed), which may speed up this implementation for the common case.
1388 // (see also JVM spec., p.243 & p.271)
1389 __ corrected_idivl(rcx);
1390 }
1391
irem()1392 void TemplateTable::irem() {
1393 transition(itos, itos);
1394 __ movl(rcx, rax);
1395 __ pop_i(rax);
1396 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1397 // they are not equal, one could do a normal division (no correction
1398 // needed), which may speed up this implementation for the common case.
1399 // (see also JVM spec., p.243 & p.271)
1400 __ corrected_idivl(rcx);
1401 __ movl(rax, rdx);
1402 }
1403
lmul()1404 void TemplateTable::lmul() {
1405 transition(ltos, ltos);
1406 #ifdef _LP64
1407 __ pop_l(rdx);
1408 __ imulq(rax, rdx);
1409 #else
1410 __ pop_l(rbx, rcx);
1411 __ push(rcx); __ push(rbx);
1412 __ push(rdx); __ push(rax);
1413 __ lmul(2 * wordSize, 0);
1414 __ addptr(rsp, 4 * wordSize); // take off temporaries
1415 #endif
1416 }
1417
ldiv()1418 void TemplateTable::ldiv() {
1419 transition(ltos, ltos);
1420 #ifdef _LP64
1421 __ mov(rcx, rax);
1422 __ pop_l(rax);
1423 // generate explicit div0 check
1424 __ testq(rcx, rcx);
1425 __ jump_cc(Assembler::zero,
1426 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1427 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1428 // they are not equal, one could do a normal division (no correction
1429 // needed), which may speed up this implementation for the common case.
1430 // (see also JVM spec., p.243 & p.271)
1431 __ corrected_idivq(rcx); // kills rbx
1432 #else
1433 __ pop_l(rbx, rcx);
1434 __ push(rcx); __ push(rbx);
1435 __ push(rdx); __ push(rax);
1436 // check if y = 0
1437 __ orl(rax, rdx);
1438 __ jump_cc(Assembler::zero,
1439 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1440 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1441 __ addptr(rsp, 4 * wordSize); // take off temporaries
1442 #endif
1443 }
1444
lrem()1445 void TemplateTable::lrem() {
1446 transition(ltos, ltos);
1447 #ifdef _LP64
1448 __ mov(rcx, rax);
1449 __ pop_l(rax);
1450 __ testq(rcx, rcx);
1451 __ jump_cc(Assembler::zero,
1452 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1453 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1454 // they are not equal, one could do a normal division (no correction
1455 // needed), which may speed up this implementation for the common case.
1456 // (see also JVM spec., p.243 & p.271)
1457 __ corrected_idivq(rcx); // kills rbx
1458 __ mov(rax, rdx);
1459 #else
1460 __ pop_l(rbx, rcx);
1461 __ push(rcx); __ push(rbx);
1462 __ push(rdx); __ push(rax);
1463 // check if y = 0
1464 __ orl(rax, rdx);
1465 __ jump_cc(Assembler::zero,
1466 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1467 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1468 __ addptr(rsp, 4 * wordSize);
1469 #endif
1470 }
1471
lshl()1472 void TemplateTable::lshl() {
1473 transition(itos, ltos);
1474 __ movl(rcx, rax); // get shift count
1475 #ifdef _LP64
1476 __ pop_l(rax); // get shift value
1477 __ shlq(rax);
1478 #else
1479 __ pop_l(rax, rdx); // get shift value
1480 __ lshl(rdx, rax);
1481 #endif
1482 }
1483
lshr()1484 void TemplateTable::lshr() {
1485 #ifdef _LP64
1486 transition(itos, ltos);
1487 __ movl(rcx, rax); // get shift count
1488 __ pop_l(rax); // get shift value
1489 __ sarq(rax);
1490 #else
1491 transition(itos, ltos);
1492 __ mov(rcx, rax); // get shift count
1493 __ pop_l(rax, rdx); // get shift value
1494 __ lshr(rdx, rax, true);
1495 #endif
1496 }
1497
lushr()1498 void TemplateTable::lushr() {
1499 transition(itos, ltos);
1500 #ifdef _LP64
1501 __ movl(rcx, rax); // get shift count
1502 __ pop_l(rax); // get shift value
1503 __ shrq(rax);
1504 #else
1505 __ mov(rcx, rax); // get shift count
1506 __ pop_l(rax, rdx); // get shift value
1507 __ lshr(rdx, rax);
1508 #endif
1509 }
1510
fop2(Operation op)1511 void TemplateTable::fop2(Operation op) {
1512 transition(ftos, ftos);
1513
1514 if (UseSSE >= 1) {
1515 switch (op) {
1516 case add:
1517 __ addss(xmm0, at_rsp());
1518 __ addptr(rsp, Interpreter::stackElementSize);
1519 break;
1520 case sub:
1521 __ movflt(xmm1, xmm0);
1522 __ pop_f(xmm0);
1523 __ subss(xmm0, xmm1);
1524 break;
1525 case mul:
1526 __ mulss(xmm0, at_rsp());
1527 __ addptr(rsp, Interpreter::stackElementSize);
1528 break;
1529 case div:
1530 __ movflt(xmm1, xmm0);
1531 __ pop_f(xmm0);
1532 __ divss(xmm0, xmm1);
1533 break;
1534 case rem:
1535 // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1536 // modulo operation. The frem method calls the function
1537 // double fmod(double x, double y) in math.h. The documentation of fmod states:
1538 // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1539 // (signalling or quiet) is returned.
1540 //
1541 // On x86_32 platforms the FPU is used to perform the modulo operation. The
1542 // reason is that on 32-bit Windows the sign of modulo operations diverges from
1543 // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1544 // The fprem instruction used on x86_32 is functionally equivalent to
1545 // SharedRuntime::frem in that it returns a NaN.
1546 #ifdef _LP64
1547 __ movflt(xmm1, xmm0);
1548 __ pop_f(xmm0);
1549 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1550 #else
1551 __ push_f(xmm0);
1552 __ pop_f();
1553 __ fld_s(at_rsp());
1554 __ fremr(rax);
1555 __ f2ieee();
1556 __ pop(rax); // pop second operand off the stack
1557 __ push_f();
1558 __ pop_f(xmm0);
1559 #endif
1560 break;
1561 default:
1562 ShouldNotReachHere();
1563 break;
1564 }
1565 } else {
1566 #ifdef _LP64
1567 ShouldNotReachHere();
1568 #else
1569 switch (op) {
1570 case add: __ fadd_s (at_rsp()); break;
1571 case sub: __ fsubr_s(at_rsp()); break;
1572 case mul: __ fmul_s (at_rsp()); break;
1573 case div: __ fdivr_s(at_rsp()); break;
1574 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1575 default : ShouldNotReachHere();
1576 }
1577 __ f2ieee();
1578 __ pop(rax); // pop second operand off the stack
1579 #endif // _LP64
1580 }
1581 }
1582
dop2(Operation op)1583 void TemplateTable::dop2(Operation op) {
1584 transition(dtos, dtos);
1585 if (UseSSE >= 2) {
1586 switch (op) {
1587 case add:
1588 __ addsd(xmm0, at_rsp());
1589 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1590 break;
1591 case sub:
1592 __ movdbl(xmm1, xmm0);
1593 __ pop_d(xmm0);
1594 __ subsd(xmm0, xmm1);
1595 break;
1596 case mul:
1597 __ mulsd(xmm0, at_rsp());
1598 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1599 break;
1600 case div:
1601 __ movdbl(xmm1, xmm0);
1602 __ pop_d(xmm0);
1603 __ divsd(xmm0, xmm1);
1604 break;
1605 case rem:
1606 // Similar to fop2(), the modulo operation is performed using the
1607 // SharedRuntime::drem method (on x86_64 platforms) or using the
1608 // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1609 #ifdef _LP64
1610 __ movdbl(xmm1, xmm0);
1611 __ pop_d(xmm0);
1612 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1613 #else
1614 __ push_d(xmm0);
1615 __ pop_d();
1616 __ fld_d(at_rsp());
1617 __ fremr(rax);
1618 __ d2ieee();
1619 __ pop(rax);
1620 __ pop(rdx);
1621 __ push_d();
1622 __ pop_d(xmm0);
1623 #endif
1624 break;
1625 default:
1626 ShouldNotReachHere();
1627 break;
1628 }
1629 } else {
1630 #ifdef _LP64
1631 ShouldNotReachHere();
1632 #else
1633 switch (op) {
1634 case add: __ fadd_d (at_rsp()); break;
1635 case sub: __ fsubr_d(at_rsp()); break;
1636 case mul: {
1637 Label L_strict;
1638 Label L_join;
1639 const Address access_flags (rcx, Method::access_flags_offset());
1640 __ get_method(rcx);
1641 __ movl(rcx, access_flags);
1642 __ testl(rcx, JVM_ACC_STRICT);
1643 __ jccb(Assembler::notZero, L_strict);
1644 __ fmul_d (at_rsp());
1645 __ jmpb(L_join);
1646 __ bind(L_strict);
1647 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1648 __ fmulp();
1649 __ fmul_d (at_rsp());
1650 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1651 __ fmulp();
1652 __ bind(L_join);
1653 break;
1654 }
1655 case div: {
1656 Label L_strict;
1657 Label L_join;
1658 const Address access_flags (rcx, Method::access_flags_offset());
1659 __ get_method(rcx);
1660 __ movl(rcx, access_flags);
1661 __ testl(rcx, JVM_ACC_STRICT);
1662 __ jccb(Assembler::notZero, L_strict);
1663 __ fdivr_d(at_rsp());
1664 __ jmp(L_join);
1665 __ bind(L_strict);
1666 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1667 __ fmul_d (at_rsp());
1668 __ fdivrp();
1669 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1670 __ fmulp();
1671 __ bind(L_join);
1672 break;
1673 }
1674 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1675 default : ShouldNotReachHere();
1676 }
1677 __ d2ieee();
1678 // Pop double precision number from rsp.
1679 __ pop(rax);
1680 __ pop(rdx);
1681 #endif
1682 }
1683 }
1684
ineg()1685 void TemplateTable::ineg() {
1686 transition(itos, itos);
1687 __ negl(rax);
1688 }
1689
lneg()1690 void TemplateTable::lneg() {
1691 transition(ltos, ltos);
1692 LP64_ONLY(__ negq(rax));
1693 NOT_LP64(__ lneg(rdx, rax));
1694 }
1695
1696 // Note: 'double' and 'long long' have 32-bits alignment on x86.
double_quadword(jlong * adr,jlong lo,jlong hi)1697 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1698 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1699 // of 128-bits operands for SSE instructions.
1700 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1701 // Store the value to a 128-bits operand.
1702 operand[0] = lo;
1703 operand[1] = hi;
1704 return operand;
1705 }
1706
1707 // Buffer for 128-bits masks used by SSE instructions.
1708 static jlong float_signflip_pool[2*2];
1709 static jlong double_signflip_pool[2*2];
1710
fneg()1711 void TemplateTable::fneg() {
1712 transition(ftos, ftos);
1713 if (UseSSE >= 1) {
1714 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
1715 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1716 } else {
1717 LP64_ONLY(ShouldNotReachHere());
1718 NOT_LP64(__ fchs());
1719 }
1720 }
1721
dneg()1722 void TemplateTable::dneg() {
1723 transition(dtos, dtos);
1724 if (UseSSE >= 2) {
1725 static jlong *double_signflip =
1726 double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1727 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1728 } else {
1729 #ifdef _LP64
1730 ShouldNotReachHere();
1731 #else
1732 __ fchs();
1733 #endif
1734 }
1735 }
1736
iinc()1737 void TemplateTable::iinc() {
1738 transition(vtos, vtos);
1739 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1740 locals_index(rbx);
1741 __ addl(iaddress(rbx), rdx);
1742 }
1743
wide_iinc()1744 void TemplateTable::wide_iinc() {
1745 transition(vtos, vtos);
1746 __ movl(rdx, at_bcp(4)); // get constant
1747 locals_index_wide(rbx);
1748 __ bswapl(rdx); // swap bytes & sign-extend constant
1749 __ sarl(rdx, 16);
1750 __ addl(iaddress(rbx), rdx);
1751 // Note: should probably use only one movl to get both
1752 // the index and the constant -> fix this
1753 }
1754
convert()1755 void TemplateTable::convert() {
1756 #ifdef _LP64
1757 // Checking
1758 #ifdef ASSERT
1759 {
1760 TosState tos_in = ilgl;
1761 TosState tos_out = ilgl;
1762 switch (bytecode()) {
1763 case Bytecodes::_i2l: // fall through
1764 case Bytecodes::_i2f: // fall through
1765 case Bytecodes::_i2d: // fall through
1766 case Bytecodes::_i2b: // fall through
1767 case Bytecodes::_i2c: // fall through
1768 case Bytecodes::_i2s: tos_in = itos; break;
1769 case Bytecodes::_l2i: // fall through
1770 case Bytecodes::_l2f: // fall through
1771 case Bytecodes::_l2d: tos_in = ltos; break;
1772 case Bytecodes::_f2i: // fall through
1773 case Bytecodes::_f2l: // fall through
1774 case Bytecodes::_f2d: tos_in = ftos; break;
1775 case Bytecodes::_d2i: // fall through
1776 case Bytecodes::_d2l: // fall through
1777 case Bytecodes::_d2f: tos_in = dtos; break;
1778 default : ShouldNotReachHere();
1779 }
1780 switch (bytecode()) {
1781 case Bytecodes::_l2i: // fall through
1782 case Bytecodes::_f2i: // fall through
1783 case Bytecodes::_d2i: // fall through
1784 case Bytecodes::_i2b: // fall through
1785 case Bytecodes::_i2c: // fall through
1786 case Bytecodes::_i2s: tos_out = itos; break;
1787 case Bytecodes::_i2l: // fall through
1788 case Bytecodes::_f2l: // fall through
1789 case Bytecodes::_d2l: tos_out = ltos; break;
1790 case Bytecodes::_i2f: // fall through
1791 case Bytecodes::_l2f: // fall through
1792 case Bytecodes::_d2f: tos_out = ftos; break;
1793 case Bytecodes::_i2d: // fall through
1794 case Bytecodes::_l2d: // fall through
1795 case Bytecodes::_f2d: tos_out = dtos; break;
1796 default : ShouldNotReachHere();
1797 }
1798 transition(tos_in, tos_out);
1799 }
1800 #endif // ASSERT
1801
1802 static const int64_t is_nan = 0x8000000000000000L;
1803
1804 // Conversion
1805 switch (bytecode()) {
1806 case Bytecodes::_i2l:
1807 __ movslq(rax, rax);
1808 break;
1809 case Bytecodes::_i2f:
1810 __ cvtsi2ssl(xmm0, rax);
1811 break;
1812 case Bytecodes::_i2d:
1813 __ cvtsi2sdl(xmm0, rax);
1814 break;
1815 case Bytecodes::_i2b:
1816 __ movsbl(rax, rax);
1817 break;
1818 case Bytecodes::_i2c:
1819 __ movzwl(rax, rax);
1820 break;
1821 case Bytecodes::_i2s:
1822 __ movswl(rax, rax);
1823 break;
1824 case Bytecodes::_l2i:
1825 __ movl(rax, rax);
1826 break;
1827 case Bytecodes::_l2f:
1828 __ cvtsi2ssq(xmm0, rax);
1829 break;
1830 case Bytecodes::_l2d:
1831 __ cvtsi2sdq(xmm0, rax);
1832 break;
1833 case Bytecodes::_f2i:
1834 {
1835 Label L;
1836 __ cvttss2sil(rax, xmm0);
1837 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1838 __ jcc(Assembler::notEqual, L);
1839 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1840 __ bind(L);
1841 }
1842 break;
1843 case Bytecodes::_f2l:
1844 {
1845 Label L;
1846 __ cvttss2siq(rax, xmm0);
1847 // NaN or overflow/underflow?
1848 __ cmp64(rax, ExternalAddress((address) &is_nan));
1849 __ jcc(Assembler::notEqual, L);
1850 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1851 __ bind(L);
1852 }
1853 break;
1854 case Bytecodes::_f2d:
1855 __ cvtss2sd(xmm0, xmm0);
1856 break;
1857 case Bytecodes::_d2i:
1858 {
1859 Label L;
1860 __ cvttsd2sil(rax, xmm0);
1861 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1862 __ jcc(Assembler::notEqual, L);
1863 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1864 __ bind(L);
1865 }
1866 break;
1867 case Bytecodes::_d2l:
1868 {
1869 Label L;
1870 __ cvttsd2siq(rax, xmm0);
1871 // NaN or overflow/underflow?
1872 __ cmp64(rax, ExternalAddress((address) &is_nan));
1873 __ jcc(Assembler::notEqual, L);
1874 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1875 __ bind(L);
1876 }
1877 break;
1878 case Bytecodes::_d2f:
1879 __ cvtsd2ss(xmm0, xmm0);
1880 break;
1881 default:
1882 ShouldNotReachHere();
1883 }
1884 #else
1885 // Checking
1886 #ifdef ASSERT
1887 { TosState tos_in = ilgl;
1888 TosState tos_out = ilgl;
1889 switch (bytecode()) {
1890 case Bytecodes::_i2l: // fall through
1891 case Bytecodes::_i2f: // fall through
1892 case Bytecodes::_i2d: // fall through
1893 case Bytecodes::_i2b: // fall through
1894 case Bytecodes::_i2c: // fall through
1895 case Bytecodes::_i2s: tos_in = itos; break;
1896 case Bytecodes::_l2i: // fall through
1897 case Bytecodes::_l2f: // fall through
1898 case Bytecodes::_l2d: tos_in = ltos; break;
1899 case Bytecodes::_f2i: // fall through
1900 case Bytecodes::_f2l: // fall through
1901 case Bytecodes::_f2d: tos_in = ftos; break;
1902 case Bytecodes::_d2i: // fall through
1903 case Bytecodes::_d2l: // fall through
1904 case Bytecodes::_d2f: tos_in = dtos; break;
1905 default : ShouldNotReachHere();
1906 }
1907 switch (bytecode()) {
1908 case Bytecodes::_l2i: // fall through
1909 case Bytecodes::_f2i: // fall through
1910 case Bytecodes::_d2i: // fall through
1911 case Bytecodes::_i2b: // fall through
1912 case Bytecodes::_i2c: // fall through
1913 case Bytecodes::_i2s: tos_out = itos; break;
1914 case Bytecodes::_i2l: // fall through
1915 case Bytecodes::_f2l: // fall through
1916 case Bytecodes::_d2l: tos_out = ltos; break;
1917 case Bytecodes::_i2f: // fall through
1918 case Bytecodes::_l2f: // fall through
1919 case Bytecodes::_d2f: tos_out = ftos; break;
1920 case Bytecodes::_i2d: // fall through
1921 case Bytecodes::_l2d: // fall through
1922 case Bytecodes::_f2d: tos_out = dtos; break;
1923 default : ShouldNotReachHere();
1924 }
1925 transition(tos_in, tos_out);
1926 }
1927 #endif // ASSERT
1928
1929 // Conversion
1930 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1931 switch (bytecode()) {
1932 case Bytecodes::_i2l:
1933 __ extend_sign(rdx, rax);
1934 break;
1935 case Bytecodes::_i2f:
1936 if (UseSSE >= 1) {
1937 __ cvtsi2ssl(xmm0, rax);
1938 } else {
1939 __ push(rax); // store int on tos
1940 __ fild_s(at_rsp()); // load int to ST0
1941 __ f2ieee(); // truncate to float size
1942 __ pop(rcx); // adjust rsp
1943 }
1944 break;
1945 case Bytecodes::_i2d:
1946 if (UseSSE >= 2) {
1947 __ cvtsi2sdl(xmm0, rax);
1948 } else {
1949 __ push(rax); // add one slot for d2ieee()
1950 __ push(rax); // store int on tos
1951 __ fild_s(at_rsp()); // load int to ST0
1952 __ d2ieee(); // truncate to double size
1953 __ pop(rcx); // adjust rsp
1954 __ pop(rcx);
1955 }
1956 break;
1957 case Bytecodes::_i2b:
1958 __ shll(rax, 24); // truncate upper 24 bits
1959 __ sarl(rax, 24); // and sign-extend byte
1960 LP64_ONLY(__ movsbl(rax, rax));
1961 break;
1962 case Bytecodes::_i2c:
1963 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1964 LP64_ONLY(__ movzwl(rax, rax));
1965 break;
1966 case Bytecodes::_i2s:
1967 __ shll(rax, 16); // truncate upper 16 bits
1968 __ sarl(rax, 16); // and sign-extend short
1969 LP64_ONLY(__ movswl(rax, rax));
1970 break;
1971 case Bytecodes::_l2i:
1972 /* nothing to do */
1973 break;
1974 case Bytecodes::_l2f:
1975 // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1976 // 64-bit long values to floats. On 32-bit platforms it is not possible
1977 // to use that instruction with 64-bit operands, therefore the FPU is
1978 // used to perform the conversion.
1979 __ push(rdx); // store long on tos
1980 __ push(rax);
1981 __ fild_d(at_rsp()); // load long to ST0
1982 __ f2ieee(); // truncate to float size
1983 __ pop(rcx); // adjust rsp
1984 __ pop(rcx);
1985 if (UseSSE >= 1) {
1986 __ push_f();
1987 __ pop_f(xmm0);
1988 }
1989 break;
1990 case Bytecodes::_l2d:
1991 // On 32-bit platforms the FPU is used for conversion because on
1992 // 32-bit platforms it is not not possible to use the cvtsi2sdq
1993 // instruction with 64-bit operands.
1994 __ push(rdx); // store long on tos
1995 __ push(rax);
1996 __ fild_d(at_rsp()); // load long to ST0
1997 __ d2ieee(); // truncate to double size
1998 __ pop(rcx); // adjust rsp
1999 __ pop(rcx);
2000 if (UseSSE >= 2) {
2001 __ push_d();
2002 __ pop_d(xmm0);
2003 }
2004 break;
2005 case Bytecodes::_f2i:
2006 // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
2007 // as it returns 0 for any NaN.
2008 if (UseSSE >= 1) {
2009 __ push_f(xmm0);
2010 } else {
2011 __ push(rcx); // reserve space for argument
2012 __ fstp_s(at_rsp()); // pass float argument on stack
2013 }
2014 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2015 break;
2016 case Bytecodes::_f2l:
2017 // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2018 // as it returns 0 for any NaN.
2019 if (UseSSE >= 1) {
2020 __ push_f(xmm0);
2021 } else {
2022 __ push(rcx); // reserve space for argument
2023 __ fstp_s(at_rsp()); // pass float argument on stack
2024 }
2025 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2026 break;
2027 case Bytecodes::_f2d:
2028 if (UseSSE < 1) {
2029 /* nothing to do */
2030 } else if (UseSSE == 1) {
2031 __ push_f(xmm0);
2032 __ pop_f();
2033 } else { // UseSSE >= 2
2034 __ cvtss2sd(xmm0, xmm0);
2035 }
2036 break;
2037 case Bytecodes::_d2i:
2038 if (UseSSE >= 2) {
2039 __ push_d(xmm0);
2040 } else {
2041 __ push(rcx); // reserve space for argument
2042 __ push(rcx);
2043 __ fstp_d(at_rsp()); // pass double argument on stack
2044 }
2045 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2046 break;
2047 case Bytecodes::_d2l:
2048 if (UseSSE >= 2) {
2049 __ push_d(xmm0);
2050 } else {
2051 __ push(rcx); // reserve space for argument
2052 __ push(rcx);
2053 __ fstp_d(at_rsp()); // pass double argument on stack
2054 }
2055 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2056 break;
2057 case Bytecodes::_d2f:
2058 if (UseSSE <= 1) {
2059 __ push(rcx); // reserve space for f2ieee()
2060 __ f2ieee(); // truncate to float size
2061 __ pop(rcx); // adjust rsp
2062 if (UseSSE == 1) {
2063 // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2064 // the conversion is performed using the FPU in this case.
2065 __ push_f();
2066 __ pop_f(xmm0);
2067 }
2068 } else { // UseSSE >= 2
2069 __ cvtsd2ss(xmm0, xmm0);
2070 }
2071 break;
2072 default :
2073 ShouldNotReachHere();
2074 }
2075 #endif
2076 }
2077
lcmp()2078 void TemplateTable::lcmp() {
2079 transition(ltos, itos);
2080 #ifdef _LP64
2081 Label done;
2082 __ pop_l(rdx);
2083 __ cmpq(rdx, rax);
2084 __ movl(rax, -1);
2085 __ jccb(Assembler::less, done);
2086 __ setb(Assembler::notEqual, rax);
2087 __ movzbl(rax, rax);
2088 __ bind(done);
2089 #else
2090
2091 // y = rdx:rax
2092 __ pop_l(rbx, rcx); // get x = rcx:rbx
2093 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2094 __ mov(rax, rcx);
2095 #endif
2096 }
2097
float_cmp(bool is_float,int unordered_result)2098 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2099 if ((is_float && UseSSE >= 1) ||
2100 (!is_float && UseSSE >= 2)) {
2101 Label done;
2102 if (is_float) {
2103 // XXX get rid of pop here, use ... reg, mem32
2104 __ pop_f(xmm1);
2105 __ ucomiss(xmm1, xmm0);
2106 } else {
2107 // XXX get rid of pop here, use ... reg, mem64
2108 __ pop_d(xmm1);
2109 __ ucomisd(xmm1, xmm0);
2110 }
2111 if (unordered_result < 0) {
2112 __ movl(rax, -1);
2113 __ jccb(Assembler::parity, done);
2114 __ jccb(Assembler::below, done);
2115 __ setb(Assembler::notEqual, rdx);
2116 __ movzbl(rax, rdx);
2117 } else {
2118 __ movl(rax, 1);
2119 __ jccb(Assembler::parity, done);
2120 __ jccb(Assembler::above, done);
2121 __ movl(rax, 0);
2122 __ jccb(Assembler::equal, done);
2123 __ decrementl(rax);
2124 }
2125 __ bind(done);
2126 } else {
2127 #ifdef _LP64
2128 ShouldNotReachHere();
2129 #else
2130 if (is_float) {
2131 __ fld_s(at_rsp());
2132 } else {
2133 __ fld_d(at_rsp());
2134 __ pop(rdx);
2135 }
2136 __ pop(rcx);
2137 __ fcmp2int(rax, unordered_result < 0);
2138 #endif // _LP64
2139 }
2140 }
2141
branch(bool is_jsr,bool is_wide)2142 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2143 __ get_method(rcx); // rcx holds method
2144 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2145 // holds bumped taken count
2146
2147 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2148 InvocationCounter::counter_offset();
2149 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2150 InvocationCounter::counter_offset();
2151
2152 // Load up edx with the branch displacement
2153 if (is_wide) {
2154 __ movl(rdx, at_bcp(1));
2155 } else {
2156 __ load_signed_short(rdx, at_bcp(1));
2157 }
2158 __ bswapl(rdx);
2159
2160 if (!is_wide) {
2161 __ sarl(rdx, 16);
2162 }
2163 LP64_ONLY(__ movl2ptr(rdx, rdx));
2164
2165 // Handle all the JSR stuff here, then exit.
2166 // It's much shorter and cleaner than intermingling with the non-JSR
2167 // normal-branch stuff occurring below.
2168 if (is_jsr) {
2169 // Pre-load the next target bytecode into rbx
2170 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2171
2172 // compute return address as bci in rax
2173 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2174 in_bytes(ConstMethod::codes_offset())));
2175 __ subptr(rax, Address(rcx, Method::const_offset()));
2176 // Adjust the bcp in r13 by the displacement in rdx
2177 __ addptr(rbcp, rdx);
2178 // jsr returns atos that is not an oop
2179 __ push_i(rax);
2180 __ dispatch_only(vtos, true);
2181 return;
2182 }
2183
2184 // Normal (non-jsr) branch handling
2185
2186 // Adjust the bcp in r13 by the displacement in rdx
2187 __ addptr(rbcp, rdx);
2188
2189 assert(UseLoopCounter || !UseOnStackReplacement,
2190 "on-stack-replacement requires loop counters");
2191 Label backedge_counter_overflow;
2192 Label profile_method;
2193 Label dispatch;
2194 if (UseLoopCounter) {
2195 // increment backedge counter for backward branches
2196 // rax: MDO
2197 // rbx: MDO bumped taken-count
2198 // rcx: method
2199 // rdx: target offset
2200 // r13: target bcp
2201 // r14: locals pointer
2202 __ testl(rdx, rdx); // check if forward or backward branch
2203 __ jcc(Assembler::positive, dispatch); // count only if backward branch
2204
2205 // check if MethodCounters exists
2206 Label has_counters;
2207 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2208 __ testptr(rax, rax);
2209 __ jcc(Assembler::notZero, has_counters);
2210 __ push(rdx);
2211 __ push(rcx);
2212 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2213 rcx);
2214 __ pop(rcx);
2215 __ pop(rdx);
2216 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2217 __ testptr(rax, rax);
2218 __ jcc(Assembler::zero, dispatch);
2219 __ bind(has_counters);
2220
2221 if (TieredCompilation) {
2222 Label no_mdo;
2223 int increment = InvocationCounter::count_increment;
2224 if (ProfileInterpreter) {
2225 // Are we profiling?
2226 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2227 __ testptr(rbx, rbx);
2228 __ jccb(Assembler::zero, no_mdo);
2229 // Increment the MDO backedge counter
2230 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2231 in_bytes(InvocationCounter::counter_offset()));
2232 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2233 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
2234 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2235 __ jmp(dispatch);
2236 }
2237 __ bind(no_mdo);
2238 // Increment backedge counter in MethodCounters*
2239 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2240 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2241 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2242 rax, false, Assembler::zero,
2243 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2244 } else { // not TieredCompilation
2245 // increment counter
2246 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2247 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
2248 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2249 __ movl(Address(rcx, be_offset), rax); // store counter
2250
2251 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
2252
2253 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2254 __ addl(rax, Address(rcx, be_offset)); // add both counters
2255
2256 if (ProfileInterpreter) {
2257 // Test to see if we should create a method data oop
2258 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2259 __ jcc(Assembler::less, dispatch);
2260
2261 // if no method data exists, go to profile method
2262 __ test_method_data_pointer(rax, profile_method);
2263
2264 if (UseOnStackReplacement) {
2265 // check for overflow against rbx which is the MDO taken count
2266 __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2267 __ jcc(Assembler::below, dispatch);
2268
2269 // When ProfileInterpreter is on, the backedge_count comes
2270 // from the MethodData*, which value does not get reset on
2271 // the call to frequency_counter_overflow(). To avoid
2272 // excessive calls to the overflow routine while the method is
2273 // being compiled, add a second test to make sure the overflow
2274 // function is called only once every overflow_frequency.
2275 const int overflow_frequency = 1024;
2276 __ andl(rbx, overflow_frequency - 1);
2277 __ jcc(Assembler::zero, backedge_counter_overflow);
2278
2279 }
2280 } else {
2281 if (UseOnStackReplacement) {
2282 // check for overflow against rax, which is the sum of the
2283 // counters
2284 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2285 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2286
2287 }
2288 }
2289 }
2290 __ bind(dispatch);
2291 }
2292
2293 // Pre-load the next target bytecode into rbx
2294 __ load_unsigned_byte(rbx, Address(rbcp, 0));
2295
2296 // continue with the bytecode @ target
2297 // rax: return bci for jsr's, unused otherwise
2298 // rbx: target bytecode
2299 // r13: target bcp
2300 __ dispatch_only(vtos, true);
2301
2302 if (UseLoopCounter) {
2303 if (ProfileInterpreter) {
2304 // Out-of-line code to allocate method data oop.
2305 __ bind(profile_method);
2306 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2307 __ set_method_data_pointer_for_bcp();
2308 __ jmp(dispatch);
2309 }
2310
2311 if (UseOnStackReplacement) {
2312 // invocation counter overflow
2313 __ bind(backedge_counter_overflow);
2314 __ negptr(rdx);
2315 __ addptr(rdx, rbcp); // branch bcp
2316 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2317 __ call_VM(noreg,
2318 CAST_FROM_FN_PTR(address,
2319 InterpreterRuntime::frequency_counter_overflow),
2320 rdx);
2321
2322 // rax: osr nmethod (osr ok) or NULL (osr not possible)
2323 // rdx: scratch
2324 // r14: locals pointer
2325 // r13: bcp
2326 __ testptr(rax, rax); // test result
2327 __ jcc(Assembler::zero, dispatch); // no osr if null
2328 // nmethod may have been invalidated (VM may block upon call_VM return)
2329 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2330 __ jcc(Assembler::notEqual, dispatch);
2331
2332 // We have the address of an on stack replacement routine in rax.
2333 // In preparation of invoking it, first we must migrate the locals
2334 // and monitors from off the interpreter frame on the stack.
2335 // Ensure to save the osr nmethod over the migration call,
2336 // it will be preserved in rbx.
2337 __ mov(rbx, rax);
2338
2339 NOT_LP64(__ get_thread(rcx));
2340
2341 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2342
2343 // rax is OSR buffer, move it to expected parameter location
2344 LP64_ONLY(__ mov(j_rarg0, rax));
2345 NOT_LP64(__ mov(rcx, rax));
2346 // We use j_rarg definitions here so that registers don't conflict as parameter
2347 // registers change across platforms as we are in the midst of a calling
2348 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2349
2350 const Register retaddr = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2351 const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2352
2353 // pop the interpreter frame
2354 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2355 __ leave(); // remove frame anchor
2356 __ pop(retaddr); // get return address
2357 __ mov(rsp, sender_sp); // set sp to sender sp
2358 // Ensure compiled code always sees stack at proper alignment
2359 __ andptr(rsp, -(StackAlignmentInBytes));
2360
2361 // unlike x86 we need no specialized return from compiled code
2362 // to the interpreter or the call stub.
2363
2364 // push the return address
2365 __ push(retaddr);
2366
2367 // and begin the OSR nmethod
2368 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2369 }
2370 }
2371 }
2372
if_0cmp(Condition cc)2373 void TemplateTable::if_0cmp(Condition cc) {
2374 transition(itos, vtos);
2375 // assume branch is more often taken than not (loops use backward branches)
2376 Label not_taken;
2377 __ testl(rax, rax);
2378 __ jcc(j_not(cc), not_taken);
2379 branch(false, false);
2380 __ bind(not_taken);
2381 __ profile_not_taken_branch(rax);
2382 }
2383
if_icmp(Condition cc)2384 void TemplateTable::if_icmp(Condition cc) {
2385 transition(itos, vtos);
2386 // assume branch is more often taken than not (loops use backward branches)
2387 Label not_taken;
2388 __ pop_i(rdx);
2389 __ cmpl(rdx, rax);
2390 __ jcc(j_not(cc), not_taken);
2391 branch(false, false);
2392 __ bind(not_taken);
2393 __ profile_not_taken_branch(rax);
2394 }
2395
if_nullcmp(Condition cc)2396 void TemplateTable::if_nullcmp(Condition cc) {
2397 transition(atos, vtos);
2398 // assume branch is more often taken than not (loops use backward branches)
2399 Label not_taken;
2400 __ testptr(rax, rax);
2401 __ jcc(j_not(cc), not_taken);
2402 branch(false, false);
2403 __ bind(not_taken);
2404 __ profile_not_taken_branch(rax);
2405 }
2406
if_acmp(Condition cc)2407 void TemplateTable::if_acmp(Condition cc) {
2408 transition(atos, vtos);
2409 // assume branch is more often taken than not (loops use backward branches)
2410 Label not_taken;
2411 __ pop_ptr(rdx);
2412 __ cmpoop(rdx, rax);
2413 __ jcc(j_not(cc), not_taken);
2414 branch(false, false);
2415 __ bind(not_taken);
2416 __ profile_not_taken_branch(rax);
2417 }
2418
ret()2419 void TemplateTable::ret() {
2420 transition(vtos, vtos);
2421 locals_index(rbx);
2422 LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2423 NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2424 __ profile_ret(rbx, rcx);
2425 __ get_method(rax);
2426 __ movptr(rbcp, Address(rax, Method::const_offset()));
2427 __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2428 ConstMethod::codes_offset()));
2429 __ dispatch_next(vtos, 0, true);
2430 }
2431
wide_ret()2432 void TemplateTable::wide_ret() {
2433 transition(vtos, vtos);
2434 locals_index_wide(rbx);
2435 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2436 __ profile_ret(rbx, rcx);
2437 __ get_method(rax);
2438 __ movptr(rbcp, Address(rax, Method::const_offset()));
2439 __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2440 __ dispatch_next(vtos, 0, true);
2441 }
2442
tableswitch()2443 void TemplateTable::tableswitch() {
2444 Label default_case, continue_execution;
2445 transition(itos, vtos);
2446
2447 // align r13/rsi
2448 __ lea(rbx, at_bcp(BytesPerInt));
2449 __ andptr(rbx, -BytesPerInt);
2450 // load lo & hi
2451 __ movl(rcx, Address(rbx, BytesPerInt));
2452 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2453 __ bswapl(rcx);
2454 __ bswapl(rdx);
2455 // check against lo & hi
2456 __ cmpl(rax, rcx);
2457 __ jcc(Assembler::less, default_case);
2458 __ cmpl(rax, rdx);
2459 __ jcc(Assembler::greater, default_case);
2460 // lookup dispatch offset
2461 __ subl(rax, rcx);
2462 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2463 __ profile_switch_case(rax, rbx, rcx);
2464 // continue execution
2465 __ bind(continue_execution);
2466 __ bswapl(rdx);
2467 LP64_ONLY(__ movl2ptr(rdx, rdx));
2468 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2469 __ addptr(rbcp, rdx);
2470 __ dispatch_only(vtos, true);
2471 // handle default
2472 __ bind(default_case);
2473 __ profile_switch_default(rax);
2474 __ movl(rdx, Address(rbx, 0));
2475 __ jmp(continue_execution);
2476 }
2477
lookupswitch()2478 void TemplateTable::lookupswitch() {
2479 transition(itos, itos);
2480 __ stop("lookupswitch bytecode should have been rewritten");
2481 }
2482
fast_linearswitch()2483 void TemplateTable::fast_linearswitch() {
2484 transition(itos, vtos);
2485 Label loop_entry, loop, found, continue_execution;
2486 // bswap rax so we can avoid bswapping the table entries
2487 __ bswapl(rax);
2488 // align r13
2489 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2490 // this instruction (change offsets
2491 // below)
2492 __ andptr(rbx, -BytesPerInt);
2493 // set counter
2494 __ movl(rcx, Address(rbx, BytesPerInt));
2495 __ bswapl(rcx);
2496 __ jmpb(loop_entry);
2497 // table search
2498 __ bind(loop);
2499 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2500 __ jcc(Assembler::equal, found);
2501 __ bind(loop_entry);
2502 __ decrementl(rcx);
2503 __ jcc(Assembler::greaterEqual, loop);
2504 // default case
2505 __ profile_switch_default(rax);
2506 __ movl(rdx, Address(rbx, 0));
2507 __ jmp(continue_execution);
2508 // entry found -> get offset
2509 __ bind(found);
2510 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2511 __ profile_switch_case(rcx, rax, rbx);
2512 // continue execution
2513 __ bind(continue_execution);
2514 __ bswapl(rdx);
2515 __ movl2ptr(rdx, rdx);
2516 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2517 __ addptr(rbcp, rdx);
2518 __ dispatch_only(vtos, true);
2519 }
2520
fast_binaryswitch()2521 void TemplateTable::fast_binaryswitch() {
2522 transition(itos, vtos);
2523 // Implementation using the following core algorithm:
2524 //
2525 // int binary_search(int key, LookupswitchPair* array, int n) {
2526 // // Binary search according to "Methodik des Programmierens" by
2527 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2528 // int i = 0;
2529 // int j = n;
2530 // while (i+1 < j) {
2531 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2532 // // with Q: for all i: 0 <= i < n: key < a[i]
2533 // // where a stands for the array and assuming that the (inexisting)
2534 // // element a[n] is infinitely big.
2535 // int h = (i + j) >> 1;
2536 // // i < h < j
2537 // if (key < array[h].fast_match()) {
2538 // j = h;
2539 // } else {
2540 // i = h;
2541 // }
2542 // }
2543 // // R: a[i] <= key < a[i+1] or Q
2544 // // (i.e., if key is within array, i is the correct index)
2545 // return i;
2546 // }
2547
2548 // Register allocation
2549 const Register key = rax; // already set (tosca)
2550 const Register array = rbx;
2551 const Register i = rcx;
2552 const Register j = rdx;
2553 const Register h = rdi;
2554 const Register temp = rsi;
2555
2556 // Find array start
2557 NOT_LP64(__ save_bcp());
2558
2559 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2560 // get rid of this
2561 // instruction (change
2562 // offsets below)
2563 __ andptr(array, -BytesPerInt);
2564
2565 // Initialize i & j
2566 __ xorl(i, i); // i = 0;
2567 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2568
2569 // Convert j into native byteordering
2570 __ bswapl(j);
2571
2572 // And start
2573 Label entry;
2574 __ jmp(entry);
2575
2576 // binary search loop
2577 {
2578 Label loop;
2579 __ bind(loop);
2580 // int h = (i + j) >> 1;
2581 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2582 __ sarl(h, 1); // h = (i + j) >> 1;
2583 // if (key < array[h].fast_match()) {
2584 // j = h;
2585 // } else {
2586 // i = h;
2587 // }
2588 // Convert array[h].match to native byte-ordering before compare
2589 __ movl(temp, Address(array, h, Address::times_8));
2590 __ bswapl(temp);
2591 __ cmpl(key, temp);
2592 // j = h if (key < array[h].fast_match())
2593 __ cmov32(Assembler::less, j, h);
2594 // i = h if (key >= array[h].fast_match())
2595 __ cmov32(Assembler::greaterEqual, i, h);
2596 // while (i+1 < j)
2597 __ bind(entry);
2598 __ leal(h, Address(i, 1)); // i+1
2599 __ cmpl(h, j); // i+1 < j
2600 __ jcc(Assembler::less, loop);
2601 }
2602
2603 // end of binary search, result index is i (must check again!)
2604 Label default_case;
2605 // Convert array[i].match to native byte-ordering before compare
2606 __ movl(temp, Address(array, i, Address::times_8));
2607 __ bswapl(temp);
2608 __ cmpl(key, temp);
2609 __ jcc(Assembler::notEqual, default_case);
2610
2611 // entry found -> j = offset
2612 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2613 __ profile_switch_case(i, key, array);
2614 __ bswapl(j);
2615 LP64_ONLY(__ movslq(j, j));
2616
2617 NOT_LP64(__ restore_bcp());
2618 NOT_LP64(__ restore_locals()); // restore rdi
2619
2620 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2621 __ addptr(rbcp, j);
2622 __ dispatch_only(vtos, true);
2623
2624 // default case -> j = default offset
2625 __ bind(default_case);
2626 __ profile_switch_default(i);
2627 __ movl(j, Address(array, -2 * BytesPerInt));
2628 __ bswapl(j);
2629 LP64_ONLY(__ movslq(j, j));
2630
2631 NOT_LP64(__ restore_bcp());
2632 NOT_LP64(__ restore_locals());
2633
2634 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2635 __ addptr(rbcp, j);
2636 __ dispatch_only(vtos, true);
2637 }
2638
_return(TosState state)2639 void TemplateTable::_return(TosState state) {
2640 transition(state, state);
2641
2642 assert(_desc->calls_vm(),
2643 "inconsistent calls_vm information"); // call in remove_activation
2644
2645 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2646 assert(state == vtos, "only valid state");
2647 Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2648 __ movptr(robj, aaddress(0));
2649 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
2650 __ load_klass(rdi, robj, tmp_load_klass);
2651 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2652 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2653 Label skip_register_finalizer;
2654 __ jcc(Assembler::zero, skip_register_finalizer);
2655
2656 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2657
2658 __ bind(skip_register_finalizer);
2659 }
2660
2661 if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
2662 Label no_safepoint;
2663 NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2664 #ifdef _LP64
2665 __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2666 #else
2667 const Register thread = rdi;
2668 __ get_thread(thread);
2669 __ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2670 #endif
2671 __ jcc(Assembler::zero, no_safepoint);
2672 __ push(state);
2673 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2674 InterpreterRuntime::at_safepoint));
2675 __ pop(state);
2676 __ bind(no_safepoint);
2677 }
2678
2679 // Narrow result if state is itos but result type is smaller.
2680 // Need to narrow in the return bytecode rather than in generate_return_entry
2681 // since compiled code callers expect the result to already be narrowed.
2682 if (state == itos) {
2683 __ narrow(rax);
2684 }
2685 __ remove_activation(state, rbcp);
2686
2687 __ jmp(rbcp);
2688 }
2689
2690 // ----------------------------------------------------------------------------
2691 // Volatile variables demand their effects be made known to all CPU's
2692 // in order. Store buffers on most chips allow reads & writes to
2693 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2694 // without some kind of memory barrier (i.e., it's not sufficient that
2695 // the interpreter does not reorder volatile references, the hardware
2696 // also must not reorder them).
2697 //
2698 // According to the new Java Memory Model (JMM):
2699 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2700 // writes act as aquire & release, so:
2701 // (2) A read cannot let unrelated NON-volatile memory refs that
2702 // happen after the read float up to before the read. It's OK for
2703 // non-volatile memory refs that happen before the volatile read to
2704 // float down below it.
2705 // (3) Similar a volatile write cannot let unrelated NON-volatile
2706 // memory refs that happen BEFORE the write float down to after the
2707 // write. It's OK for non-volatile memory refs that happen after the
2708 // volatile write to float up before it.
2709 //
2710 // We only put in barriers around volatile refs (they are expensive),
2711 // not _between_ memory refs (that would require us to track the
2712 // flavor of the previous memory refs). Requirements (2) and (3)
2713 // require some barriers before volatile stores and after volatile
2714 // loads. These nearly cover requirement (1) but miss the
2715 // volatile-store-volatile-load case. This final case is placed after
2716 // volatile-stores although it could just as well go before
2717 // volatile-loads.
2718
volatile_barrier(Assembler::Membar_mask_bits order_constraint)2719 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2720 // Helper function to insert a is-volatile test and memory barrier
2721 __ membar(order_constraint);
2722 }
2723
resolve_cache_and_index(int byte_no,Register cache,Register index,size_t index_size)2724 void TemplateTable::resolve_cache_and_index(int byte_no,
2725 Register cache,
2726 Register index,
2727 size_t index_size) {
2728 const Register temp = rbx;
2729 assert_different_registers(cache, index, temp);
2730
2731 Label L_clinit_barrier_slow;
2732 Label resolved;
2733
2734 Bytecodes::Code code = bytecode();
2735 switch (code) {
2736 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2737 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2738 default: break;
2739 }
2740
2741 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2742 __ get_cache_and_index_and_bytecode_at_bcp(cache, index, temp, byte_no, 1, index_size);
2743 __ cmpl(temp, code); // have we resolved this bytecode?
2744 __ jcc(Assembler::equal, resolved);
2745
2746 // resolve first time through
2747 // Class initialization barrier slow path lands here as well.
2748 __ bind(L_clinit_barrier_slow);
2749 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2750 __ movl(temp, code);
2751 __ call_VM(noreg, entry, temp);
2752 // Update registers with resolved info
2753 __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
2754
2755 __ bind(resolved);
2756
2757 // Class initialization barrier for static methods
2758 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2759 const Register method = temp;
2760 const Register klass = temp;
2761 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2762 assert(thread != noreg, "x86_32 not supported");
2763
2764 __ load_resolved_method_at_index(byte_no, method, cache, index);
2765 __ load_method_holder(klass, method);
2766 __ clinit_barrier(klass, thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
2767 }
2768 }
2769
2770 // The cache and index registers must be set before call
load_field_cp_cache_entry(Register obj,Register cache,Register index,Register off,Register flags,bool is_static=false)2771 void TemplateTable::load_field_cp_cache_entry(Register obj,
2772 Register cache,
2773 Register index,
2774 Register off,
2775 Register flags,
2776 bool is_static = false) {
2777 assert_different_registers(cache, index, flags, off);
2778
2779 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2780 // Field offset
2781 __ movptr(off, Address(cache, index, Address::times_ptr,
2782 in_bytes(cp_base_offset +
2783 ConstantPoolCacheEntry::f2_offset())));
2784 // Flags
2785 __ movl(flags, Address(cache, index, Address::times_ptr,
2786 in_bytes(cp_base_offset +
2787 ConstantPoolCacheEntry::flags_offset())));
2788
2789 // klass overwrite register
2790 if (is_static) {
2791 __ movptr(obj, Address(cache, index, Address::times_ptr,
2792 in_bytes(cp_base_offset +
2793 ConstantPoolCacheEntry::f1_offset())));
2794 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2795 __ movptr(obj, Address(obj, mirror_offset));
2796 __ resolve_oop_handle(obj);
2797 }
2798 }
2799
load_invoke_cp_cache_entry(int byte_no,Register method,Register itable_index,Register flags,bool is_invokevirtual,bool is_invokevfinal,bool is_invokedynamic)2800 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2801 Register method,
2802 Register itable_index,
2803 Register flags,
2804 bool is_invokevirtual,
2805 bool is_invokevfinal, /*unused*/
2806 bool is_invokedynamic) {
2807 // setup registers
2808 const Register cache = rcx;
2809 const Register index = rdx;
2810 assert_different_registers(method, flags);
2811 assert_different_registers(method, cache, index);
2812 assert_different_registers(itable_index, flags);
2813 assert_different_registers(itable_index, cache, index);
2814 // determine constant pool cache field offsets
2815 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2816 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2817 ConstantPoolCacheEntry::flags_offset());
2818 // access constant pool cache fields
2819 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2820 ConstantPoolCacheEntry::f2_offset());
2821
2822 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2823 resolve_cache_and_index(byte_no, cache, index, index_size);
2824 __ load_resolved_method_at_index(byte_no, method, cache, index);
2825
2826 if (itable_index != noreg) {
2827 // pick up itable or appendix index from f2 also:
2828 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2829 }
2830 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2831 }
2832
2833 // The registers cache and index expected to be set before call.
2834 // Correct values of the cache and index registers are preserved.
jvmti_post_field_access(Register cache,Register index,bool is_static,bool has_tos)2835 void TemplateTable::jvmti_post_field_access(Register cache,
2836 Register index,
2837 bool is_static,
2838 bool has_tos) {
2839 if (JvmtiExport::can_post_field_access()) {
2840 // Check to see if a field access watch has been set before we take
2841 // the time to call into the VM.
2842 Label L1;
2843 assert_different_registers(cache, index, rax);
2844 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2845 __ testl(rax,rax);
2846 __ jcc(Assembler::zero, L1);
2847
2848 // cache entry pointer
2849 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2850 __ shll(index, LogBytesPerWord);
2851 __ addptr(cache, index);
2852 if (is_static) {
2853 __ xorptr(rax, rax); // NULL object reference
2854 } else {
2855 __ pop(atos); // Get the object
2856 __ verify_oop(rax);
2857 __ push(atos); // Restore stack state
2858 }
2859 // rax,: object pointer or NULL
2860 // cache: cache entry pointer
2861 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2862 rax, cache);
2863 __ get_cache_and_index_at_bcp(cache, index, 1);
2864 __ bind(L1);
2865 }
2866 }
2867
pop_and_check_object(Register r)2868 void TemplateTable::pop_and_check_object(Register r) {
2869 __ pop_ptr(r);
2870 __ null_check(r); // for field access must check obj.
2871 __ verify_oop(r);
2872 }
2873
getfield_or_static(int byte_no,bool is_static,RewriteControl rc)2874 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2875 transition(vtos, vtos);
2876
2877 const Register cache = rcx;
2878 const Register index = rdx;
2879 const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2880 const Register off = rbx;
2881 const Register flags = rax;
2882 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2883
2884 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2885 jvmti_post_field_access(cache, index, is_static, false);
2886 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2887
2888 if (!is_static) pop_and_check_object(obj);
2889
2890 const Address field(obj, off, Address::times_1, 0*wordSize);
2891
2892 Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
2893
2894 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2895 // Make sure we don't need to mask edx after the above shift
2896 assert(btos == 0, "change code, btos != 0");
2897
2898 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2899
2900 __ jcc(Assembler::notZero, notByte);
2901 // btos
2902 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
2903 __ push(btos);
2904 // Rewrite bytecode to be faster
2905 if (!is_static && rc == may_rewrite) {
2906 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2907 }
2908 __ jmp(Done);
2909
2910 __ bind(notByte);
2911 __ cmpl(flags, ztos);
2912 __ jcc(Assembler::notEqual, notBool);
2913
2914 // ztos (same code as btos)
2915 __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
2916 __ push(ztos);
2917 // Rewrite bytecode to be faster
2918 if (!is_static && rc == may_rewrite) {
2919 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2920 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2921 }
2922 __ jmp(Done);
2923
2924 __ bind(notBool);
2925 __ cmpl(flags, atos);
2926 __ jcc(Assembler::notEqual, notObj);
2927 // atos
2928 do_oop_load(_masm, field, rax);
2929 __ push(atos);
2930 if (!is_static && rc == may_rewrite) {
2931 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2932 }
2933 __ jmp(Done);
2934
2935 __ bind(notObj);
2936 __ cmpl(flags, itos);
2937 __ jcc(Assembler::notEqual, notInt);
2938 // itos
2939 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
2940 __ push(itos);
2941 // Rewrite bytecode to be faster
2942 if (!is_static && rc == may_rewrite) {
2943 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2944 }
2945 __ jmp(Done);
2946
2947 __ bind(notInt);
2948 __ cmpl(flags, ctos);
2949 __ jcc(Assembler::notEqual, notChar);
2950 // ctos
2951 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
2952 __ push(ctos);
2953 // Rewrite bytecode to be faster
2954 if (!is_static && rc == may_rewrite) {
2955 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2956 }
2957 __ jmp(Done);
2958
2959 __ bind(notChar);
2960 __ cmpl(flags, stos);
2961 __ jcc(Assembler::notEqual, notShort);
2962 // stos
2963 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
2964 __ push(stos);
2965 // Rewrite bytecode to be faster
2966 if (!is_static && rc == may_rewrite) {
2967 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2968 }
2969 __ jmp(Done);
2970
2971 __ bind(notShort);
2972 __ cmpl(flags, ltos);
2973 __ jcc(Assembler::notEqual, notLong);
2974 // ltos
2975 // Generate code as if volatile (x86_32). There just aren't enough registers to
2976 // save that information and this code is faster than the test.
2977 __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
2978 __ push(ltos);
2979 // Rewrite bytecode to be faster
2980 LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
2981 __ jmp(Done);
2982
2983 __ bind(notLong);
2984 __ cmpl(flags, ftos);
2985 __ jcc(Assembler::notEqual, notFloat);
2986 // ftos
2987
2988 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
2989 __ push(ftos);
2990 // Rewrite bytecode to be faster
2991 if (!is_static && rc == may_rewrite) {
2992 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2993 }
2994 __ jmp(Done);
2995
2996 __ bind(notFloat);
2997 #ifdef ASSERT
2998 Label notDouble;
2999 __ cmpl(flags, dtos);
3000 __ jcc(Assembler::notEqual, notDouble);
3001 #endif
3002 // dtos
3003 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3004 __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
3005 __ push(dtos);
3006 // Rewrite bytecode to be faster
3007 if (!is_static && rc == may_rewrite) {
3008 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
3009 }
3010 #ifdef ASSERT
3011 __ jmp(Done);
3012
3013 __ bind(notDouble);
3014 __ stop("Bad state");
3015 #endif
3016
3017 __ bind(Done);
3018 // [jk] not needed currently
3019 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3020 // Assembler::LoadStore));
3021 }
3022
getfield(int byte_no)3023 void TemplateTable::getfield(int byte_no) {
3024 getfield_or_static(byte_no, false);
3025 }
3026
nofast_getfield(int byte_no)3027 void TemplateTable::nofast_getfield(int byte_no) {
3028 getfield_or_static(byte_no, false, may_not_rewrite);
3029 }
3030
getstatic(int byte_no)3031 void TemplateTable::getstatic(int byte_no) {
3032 getfield_or_static(byte_no, true);
3033 }
3034
3035
3036 // The registers cache and index expected to be set before call.
3037 // The function may destroy various registers, just not the cache and index registers.
jvmti_post_field_mod(Register cache,Register index,bool is_static)3038 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3039
3040 const Register robj = LP64_ONLY(c_rarg2) NOT_LP64(rax);
3041 const Register RBX = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
3042 const Register RCX = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3043 const Register RDX = LP64_ONLY(rscratch1) NOT_LP64(rdx);
3044
3045 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3046
3047 if (JvmtiExport::can_post_field_modification()) {
3048 // Check to see if a field modification watch has been set before
3049 // we take the time to call into the VM.
3050 Label L1;
3051 assert_different_registers(cache, index, rax);
3052 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3053 __ testl(rax, rax);
3054 __ jcc(Assembler::zero, L1);
3055
3056 __ get_cache_and_index_at_bcp(robj, RDX, 1);
3057
3058
3059 if (is_static) {
3060 // Life is simple. Null out the object pointer.
3061 __ xorl(RBX, RBX);
3062
3063 } else {
3064 // Life is harder. The stack holds the value on top, followed by
3065 // the object. We don't know the size of the value, though; it
3066 // could be one or two words depending on its type. As a result,
3067 // we must find the type to determine where the object is.
3068 #ifndef _LP64
3069 Label two_word, valsize_known;
3070 #endif
3071 __ movl(RCX, Address(robj, RDX,
3072 Address::times_ptr,
3073 in_bytes(cp_base_offset +
3074 ConstantPoolCacheEntry::flags_offset())));
3075 NOT_LP64(__ mov(rbx, rsp));
3076 __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3077
3078 // Make sure we don't need to mask rcx after the above shift
3079 ConstantPoolCacheEntry::verify_tos_state_shift();
3080 #ifdef _LP64
3081 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
3082 __ cmpl(c_rarg3, ltos);
3083 __ cmovptr(Assembler::equal,
3084 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3085 __ cmpl(c_rarg3, dtos);
3086 __ cmovptr(Assembler::equal,
3087 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3088 #else
3089 __ cmpl(rcx, ltos);
3090 __ jccb(Assembler::equal, two_word);
3091 __ cmpl(rcx, dtos);
3092 __ jccb(Assembler::equal, two_word);
3093 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3094 __ jmpb(valsize_known);
3095
3096 __ bind(two_word);
3097 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3098
3099 __ bind(valsize_known);
3100 // setup object pointer
3101 __ movptr(rbx, Address(rbx, 0));
3102 #endif
3103 }
3104 // cache entry pointer
3105 __ addptr(robj, in_bytes(cp_base_offset));
3106 __ shll(RDX, LogBytesPerWord);
3107 __ addptr(robj, RDX);
3108 // object (tos)
3109 __ mov(RCX, rsp);
3110 // c_rarg1: object pointer set up above (NULL if static)
3111 // c_rarg2: cache entry pointer
3112 // c_rarg3: jvalue object on the stack
3113 __ call_VM(noreg,
3114 CAST_FROM_FN_PTR(address,
3115 InterpreterRuntime::post_field_modification),
3116 RBX, robj, RCX);
3117 __ get_cache_and_index_at_bcp(cache, index, 1);
3118 __ bind(L1);
3119 }
3120 }
3121
putfield_or_static(int byte_no,bool is_static,RewriteControl rc)3122 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3123 transition(vtos, vtos);
3124
3125 const Register cache = rcx;
3126 const Register index = rdx;
3127 const Register obj = rcx;
3128 const Register off = rbx;
3129 const Register flags = rax;
3130
3131 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3132 jvmti_post_field_mod(cache, index, is_static);
3133 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3134
3135 // [jk] not needed currently
3136 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3137 // Assembler::StoreStore));
3138
3139 Label notVolatile, Done;
3140 __ movl(rdx, flags);
3141 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3142 __ andl(rdx, 0x1);
3143
3144 // Check for volatile store
3145 __ testl(rdx, rdx);
3146 __ jcc(Assembler::zero, notVolatile);
3147
3148 putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3149 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3150 Assembler::StoreStore));
3151 __ jmp(Done);
3152 __ bind(notVolatile);
3153
3154 putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
3155
3156 __ bind(Done);
3157 }
3158
putfield_or_static_helper(int byte_no,bool is_static,RewriteControl rc,Register obj,Register off,Register flags)3159 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3160 Register obj, Register off, Register flags) {
3161
3162 // field addresses
3163 const Address field(obj, off, Address::times_1, 0*wordSize);
3164 NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3165
3166 Label notByte, notBool, notInt, notShort, notChar,
3167 notLong, notFloat, notObj;
3168 Label Done;
3169
3170 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3171
3172 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3173
3174 assert(btos == 0, "change code, btos != 0");
3175 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3176 __ jcc(Assembler::notZero, notByte);
3177
3178 // btos
3179 {
3180 __ pop(btos);
3181 if (!is_static) pop_and_check_object(obj);
3182 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3183 if (!is_static && rc == may_rewrite) {
3184 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3185 }
3186 __ jmp(Done);
3187 }
3188
3189 __ bind(notByte);
3190 __ cmpl(flags, ztos);
3191 __ jcc(Assembler::notEqual, notBool);
3192
3193 // ztos
3194 {
3195 __ pop(ztos);
3196 if (!is_static) pop_and_check_object(obj);
3197 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3198 if (!is_static && rc == may_rewrite) {
3199 patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3200 }
3201 __ jmp(Done);
3202 }
3203
3204 __ bind(notBool);
3205 __ cmpl(flags, atos);
3206 __ jcc(Assembler::notEqual, notObj);
3207
3208 // atos
3209 {
3210 __ pop(atos);
3211 if (!is_static) pop_and_check_object(obj);
3212 // Store into the field
3213 do_oop_store(_masm, field, rax);
3214 if (!is_static && rc == may_rewrite) {
3215 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3216 }
3217 __ jmp(Done);
3218 }
3219
3220 __ bind(notObj);
3221 __ cmpl(flags, itos);
3222 __ jcc(Assembler::notEqual, notInt);
3223
3224 // itos
3225 {
3226 __ pop(itos);
3227 if (!is_static) pop_and_check_object(obj);
3228 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3229 if (!is_static && rc == may_rewrite) {
3230 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3231 }
3232 __ jmp(Done);
3233 }
3234
3235 __ bind(notInt);
3236 __ cmpl(flags, ctos);
3237 __ jcc(Assembler::notEqual, notChar);
3238
3239 // ctos
3240 {
3241 __ pop(ctos);
3242 if (!is_static) pop_and_check_object(obj);
3243 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3244 if (!is_static && rc == may_rewrite) {
3245 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3246 }
3247 __ jmp(Done);
3248 }
3249
3250 __ bind(notChar);
3251 __ cmpl(flags, stos);
3252 __ jcc(Assembler::notEqual, notShort);
3253
3254 // stos
3255 {
3256 __ pop(stos);
3257 if (!is_static) pop_and_check_object(obj);
3258 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3259 if (!is_static && rc == may_rewrite) {
3260 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3261 }
3262 __ jmp(Done);
3263 }
3264
3265 __ bind(notShort);
3266 __ cmpl(flags, ltos);
3267 __ jcc(Assembler::notEqual, notLong);
3268
3269 // ltos
3270 {
3271 __ pop(ltos);
3272 if (!is_static) pop_and_check_object(obj);
3273 // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3274 __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg);
3275 #ifdef _LP64
3276 if (!is_static && rc == may_rewrite) {
3277 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3278 }
3279 #endif // _LP64
3280 __ jmp(Done);
3281 }
3282
3283 __ bind(notLong);
3284 __ cmpl(flags, ftos);
3285 __ jcc(Assembler::notEqual, notFloat);
3286
3287 // ftos
3288 {
3289 __ pop(ftos);
3290 if (!is_static) pop_and_check_object(obj);
3291 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
3292 if (!is_static && rc == may_rewrite) {
3293 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3294 }
3295 __ jmp(Done);
3296 }
3297
3298 __ bind(notFloat);
3299 #ifdef ASSERT
3300 Label notDouble;
3301 __ cmpl(flags, dtos);
3302 __ jcc(Assembler::notEqual, notDouble);
3303 #endif
3304
3305 // dtos
3306 {
3307 __ pop(dtos);
3308 if (!is_static) pop_and_check_object(obj);
3309 // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3310 __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg);
3311 if (!is_static && rc == may_rewrite) {
3312 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3313 }
3314 }
3315
3316 #ifdef ASSERT
3317 __ jmp(Done);
3318
3319 __ bind(notDouble);
3320 __ stop("Bad state");
3321 #endif
3322
3323 __ bind(Done);
3324 }
3325
putfield(int byte_no)3326 void TemplateTable::putfield(int byte_no) {
3327 putfield_or_static(byte_no, false);
3328 }
3329
nofast_putfield(int byte_no)3330 void TemplateTable::nofast_putfield(int byte_no) {
3331 putfield_or_static(byte_no, false, may_not_rewrite);
3332 }
3333
putstatic(int byte_no)3334 void TemplateTable::putstatic(int byte_no) {
3335 putfield_or_static(byte_no, true);
3336 }
3337
jvmti_post_fast_field_mod()3338 void TemplateTable::jvmti_post_fast_field_mod() {
3339
3340 const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3341
3342 if (JvmtiExport::can_post_field_modification()) {
3343 // Check to see if a field modification watch has been set before
3344 // we take the time to call into the VM.
3345 Label L2;
3346 __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3347 __ testl(scratch, scratch);
3348 __ jcc(Assembler::zero, L2);
3349 __ pop_ptr(rbx); // copy the object pointer from tos
3350 __ verify_oop(rbx);
3351 __ push_ptr(rbx); // put the object pointer back on tos
3352 // Save tos values before call_VM() clobbers them. Since we have
3353 // to do it for every data type, we use the saved values as the
3354 // jvalue object.
3355 switch (bytecode()) { // load values into the jvalue object
3356 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3357 case Bytecodes::_fast_bputfield: // fall through
3358 case Bytecodes::_fast_zputfield: // fall through
3359 case Bytecodes::_fast_sputfield: // fall through
3360 case Bytecodes::_fast_cputfield: // fall through
3361 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3362 case Bytecodes::_fast_dputfield: __ push(dtos); break;
3363 case Bytecodes::_fast_fputfield: __ push(ftos); break;
3364 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3365
3366 default:
3367 ShouldNotReachHere();
3368 }
3369 __ mov(scratch, rsp); // points to jvalue on the stack
3370 // access constant pool cache entry
3371 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3372 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3373 __ verify_oop(rbx);
3374 // rbx: object pointer copied above
3375 // c_rarg2: cache entry pointer
3376 // c_rarg3: jvalue object on the stack
3377 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3378 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3379
3380 switch (bytecode()) { // restore tos values
3381 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3382 case Bytecodes::_fast_bputfield: // fall through
3383 case Bytecodes::_fast_zputfield: // fall through
3384 case Bytecodes::_fast_sputfield: // fall through
3385 case Bytecodes::_fast_cputfield: // fall through
3386 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3387 case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3388 case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3389 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3390 default: break;
3391 }
3392 __ bind(L2);
3393 }
3394 }
3395
fast_storefield(TosState state)3396 void TemplateTable::fast_storefield(TosState state) {
3397 transition(state, vtos);
3398
3399 ByteSize base = ConstantPoolCache::base_offset();
3400
3401 jvmti_post_fast_field_mod();
3402
3403 // access constant pool cache
3404 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3405
3406 // test for volatile with rdx but rdx is tos register for lputfield.
3407 __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3408 in_bytes(base +
3409 ConstantPoolCacheEntry::flags_offset())));
3410
3411 // replace index with field offset from cache entry
3412 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3413 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3414
3415 // [jk] not needed currently
3416 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3417 // Assembler::StoreStore));
3418
3419 Label notVolatile, Done;
3420 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3421 __ andl(rdx, 0x1);
3422
3423 // Get object from stack
3424 pop_and_check_object(rcx);
3425
3426 // field address
3427 const Address field(rcx, rbx, Address::times_1);
3428
3429 // Check for volatile store
3430 __ testl(rdx, rdx);
3431 __ jcc(Assembler::zero, notVolatile);
3432
3433 fast_storefield_helper(field, rax);
3434 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3435 Assembler::StoreStore));
3436 __ jmp(Done);
3437 __ bind(notVolatile);
3438
3439 fast_storefield_helper(field, rax);
3440
3441 __ bind(Done);
3442 }
3443
fast_storefield_helper(Address field,Register rax)3444 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3445
3446 // access field
3447 switch (bytecode()) {
3448 case Bytecodes::_fast_aputfield:
3449 do_oop_store(_masm, field, rax);
3450 break;
3451 case Bytecodes::_fast_lputfield:
3452 #ifdef _LP64
3453 __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
3454 #else
3455 __ stop("should not be rewritten");
3456 #endif
3457 break;
3458 case Bytecodes::_fast_iputfield:
3459 __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3460 break;
3461 case Bytecodes::_fast_zputfield:
3462 __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3463 break;
3464 case Bytecodes::_fast_bputfield:
3465 __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3466 break;
3467 case Bytecodes::_fast_sputfield:
3468 __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3469 break;
3470 case Bytecodes::_fast_cputfield:
3471 __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3472 break;
3473 case Bytecodes::_fast_fputfield:
3474 __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
3475 break;
3476 case Bytecodes::_fast_dputfield:
3477 __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
3478 break;
3479 default:
3480 ShouldNotReachHere();
3481 }
3482 }
3483
fast_accessfield(TosState state)3484 void TemplateTable::fast_accessfield(TosState state) {
3485 transition(atos, state);
3486
3487 // Do the JVMTI work here to avoid disturbing the register state below
3488 if (JvmtiExport::can_post_field_access()) {
3489 // Check to see if a field access watch has been set before we
3490 // take the time to call into the VM.
3491 Label L1;
3492 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3493 __ testl(rcx, rcx);
3494 __ jcc(Assembler::zero, L1);
3495 // access constant pool cache entry
3496 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3497 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3498 __ verify_oop(rax);
3499 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
3500 LP64_ONLY(__ mov(c_rarg1, rax));
3501 // c_rarg1: object pointer copied above
3502 // c_rarg2: cache entry pointer
3503 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3504 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3505 __ pop_ptr(rax); // restore object pointer
3506 __ bind(L1);
3507 }
3508
3509 // access constant pool cache
3510 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3511 // replace index with field offset from cache entry
3512 // [jk] not needed currently
3513 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3514 // in_bytes(ConstantPoolCache::base_offset() +
3515 // ConstantPoolCacheEntry::flags_offset())));
3516 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3517 // __ andl(rdx, 0x1);
3518 //
3519 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3520 in_bytes(ConstantPoolCache::base_offset() +
3521 ConstantPoolCacheEntry::f2_offset())));
3522
3523 // rax: object
3524 __ verify_oop(rax);
3525 __ null_check(rax);
3526 Address field(rax, rbx, Address::times_1);
3527
3528 // access field
3529 switch (bytecode()) {
3530 case Bytecodes::_fast_agetfield:
3531 do_oop_load(_masm, field, rax);
3532 __ verify_oop(rax);
3533 break;
3534 case Bytecodes::_fast_lgetfield:
3535 #ifdef _LP64
3536 __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3537 #else
3538 __ stop("should not be rewritten");
3539 #endif
3540 break;
3541 case Bytecodes::_fast_igetfield:
3542 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3543 break;
3544 case Bytecodes::_fast_bgetfield:
3545 __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3546 break;
3547 case Bytecodes::_fast_sgetfield:
3548 __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3549 break;
3550 case Bytecodes::_fast_cgetfield:
3551 __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3552 break;
3553 case Bytecodes::_fast_fgetfield:
3554 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3555 break;
3556 case Bytecodes::_fast_dgetfield:
3557 __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3558 break;
3559 default:
3560 ShouldNotReachHere();
3561 }
3562 // [jk] not needed currently
3563 // Label notVolatile;
3564 // __ testl(rdx, rdx);
3565 // __ jcc(Assembler::zero, notVolatile);
3566 // __ membar(Assembler::LoadLoad);
3567 // __ bind(notVolatile);
3568 }
3569
fast_xaccess(TosState state)3570 void TemplateTable::fast_xaccess(TosState state) {
3571 transition(vtos, state);
3572
3573 // get receiver
3574 __ movptr(rax, aaddress(0));
3575 // access constant pool cache
3576 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3577 __ movptr(rbx,
3578 Address(rcx, rdx, Address::times_ptr,
3579 in_bytes(ConstantPoolCache::base_offset() +
3580 ConstantPoolCacheEntry::f2_offset())));
3581 // make sure exception is reported in correct bcp range (getfield is
3582 // next instruction)
3583 __ increment(rbcp);
3584 __ null_check(rax);
3585 const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3586 switch (state) {
3587 case itos:
3588 __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3589 break;
3590 case atos:
3591 do_oop_load(_masm, field, rax);
3592 __ verify_oop(rax);
3593 break;
3594 case ftos:
3595 __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3596 break;
3597 default:
3598 ShouldNotReachHere();
3599 }
3600
3601 // [jk] not needed currently
3602 // Label notVolatile;
3603 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3604 // in_bytes(ConstantPoolCache::base_offset() +
3605 // ConstantPoolCacheEntry::flags_offset())));
3606 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3607 // __ testl(rdx, 0x1);
3608 // __ jcc(Assembler::zero, notVolatile);
3609 // __ membar(Assembler::LoadLoad);
3610 // __ bind(notVolatile);
3611
3612 __ decrement(rbcp);
3613 }
3614
3615 //-----------------------------------------------------------------------------
3616 // Calls
3617
count_calls(Register method,Register temp)3618 void TemplateTable::count_calls(Register method, Register temp) {
3619 // implemented elsewhere
3620 ShouldNotReachHere();
3621 }
3622
prepare_invoke(int byte_no,Register method,Register index,Register recv,Register flags)3623 void TemplateTable::prepare_invoke(int byte_no,
3624 Register method, // linked method (or i-klass)
3625 Register index, // itable index, MethodType, etc.
3626 Register recv, // if caller wants to see it
3627 Register flags // if caller wants to test it
3628 ) {
3629 // determine flags
3630 const Bytecodes::Code code = bytecode();
3631 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3632 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3633 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3634 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3635 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3636 const bool load_receiver = (recv != noreg);
3637 const bool save_flags = (flags != noreg);
3638 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3639 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3640 assert(flags == noreg || flags == rdx, "");
3641 assert(recv == noreg || recv == rcx, "");
3642
3643 // setup registers & access constant pool cache
3644 if (recv == noreg) recv = rcx;
3645 if (flags == noreg) flags = rdx;
3646 assert_different_registers(method, index, recv, flags);
3647
3648 // save 'interpreter return address'
3649 __ save_bcp();
3650
3651 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3652
3653 // maybe push appendix to arguments (just before return address)
3654 if (is_invokedynamic || is_invokehandle) {
3655 Label L_no_push;
3656 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3657 __ jcc(Assembler::zero, L_no_push);
3658 // Push the appendix as a trailing parameter.
3659 // This must be done before we get the receiver,
3660 // since the parameter_size includes it.
3661 __ push(rbx);
3662 __ mov(rbx, index);
3663 __ load_resolved_reference_at_index(index, rbx);
3664 __ pop(rbx);
3665 __ push(index); // push appendix (MethodType, CallSite, etc.)
3666 __ bind(L_no_push);
3667 }
3668
3669 // load receiver if needed (after appendix is pushed so parameter size is correct)
3670 // Note: no return address pushed yet
3671 if (load_receiver) {
3672 __ movl(recv, flags);
3673 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3674 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3675 const int receiver_is_at_end = -1; // back off one slot to get receiver
3676 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3677 __ movptr(recv, recv_addr);
3678 __ verify_oop(recv);
3679 }
3680
3681 if (save_flags) {
3682 __ movl(rbcp, flags);
3683 }
3684
3685 // compute return type
3686 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3687 // Make sure we don't need to mask flags after the above shift
3688 ConstantPoolCacheEntry::verify_tos_state_shift();
3689 // load return address
3690 {
3691 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3692 ExternalAddress table(table_addr);
3693 LP64_ONLY(__ lea(rscratch1, table));
3694 LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3695 NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3696 }
3697
3698 // push return address
3699 __ push(flags);
3700
3701 // Restore flags value from the constant pool cache, and restore rsi
3702 // for later null checks. r13 is the bytecode pointer
3703 if (save_flags) {
3704 __ movl(flags, rbcp);
3705 __ restore_bcp();
3706 }
3707 }
3708
invokevirtual_helper(Register index,Register recv,Register flags)3709 void TemplateTable::invokevirtual_helper(Register index,
3710 Register recv,
3711 Register flags) {
3712 // Uses temporary registers rax, rdx
3713 assert_different_registers(index, recv, rax, rdx);
3714 assert(index == rbx, "");
3715 assert(recv == rcx, "");
3716
3717 // Test for an invoke of a final method
3718 Label notFinal;
3719 __ movl(rax, flags);
3720 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3721 __ jcc(Assembler::zero, notFinal);
3722
3723 const Register method = index; // method must be rbx
3724 assert(method == rbx,
3725 "Method* must be rbx for interpreter calling convention");
3726
3727 // do the call - the index is actually the method to call
3728 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3729
3730 // It's final, need a null check here!
3731 __ null_check(recv);
3732
3733 // profile this call
3734 __ profile_final_call(rax);
3735 __ profile_arguments_type(rax, method, rbcp, true);
3736
3737 __ jump_from_interpreted(method, rax);
3738
3739 __ bind(notFinal);
3740
3741 // get receiver klass
3742 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3743 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3744 __ load_klass(rax, recv, tmp_load_klass);
3745
3746 // profile this call
3747 __ profile_virtual_call(rax, rlocals, rdx);
3748 // get target Method* & entry point
3749 __ lookup_virtual_method(rax, index, method);
3750
3751 __ profile_arguments_type(rdx, method, rbcp, true);
3752 __ jump_from_interpreted(method, rdx);
3753 }
3754
invokevirtual(int byte_no)3755 void TemplateTable::invokevirtual(int byte_no) {
3756 transition(vtos, vtos);
3757 assert(byte_no == f2_byte, "use this argument");
3758 prepare_invoke(byte_no,
3759 rbx, // method or vtable index
3760 noreg, // unused itable index
3761 rcx, rdx); // recv, flags
3762
3763 // rbx: index
3764 // rcx: receiver
3765 // rdx: flags
3766
3767 invokevirtual_helper(rbx, rcx, rdx);
3768 }
3769
invokespecial(int byte_no)3770 void TemplateTable::invokespecial(int byte_no) {
3771 transition(vtos, vtos);
3772 assert(byte_no == f1_byte, "use this argument");
3773 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3774 rcx); // get receiver also for null check
3775 __ verify_oop(rcx);
3776 __ null_check(rcx);
3777 // do the call
3778 __ profile_call(rax);
3779 __ profile_arguments_type(rax, rbx, rbcp, false);
3780 __ jump_from_interpreted(rbx, rax);
3781 }
3782
invokestatic(int byte_no)3783 void TemplateTable::invokestatic(int byte_no) {
3784 transition(vtos, vtos);
3785 assert(byte_no == f1_byte, "use this argument");
3786 prepare_invoke(byte_no, rbx); // get f1 Method*
3787 // do the call
3788 __ profile_call(rax);
3789 __ profile_arguments_type(rax, rbx, rbcp, false);
3790 __ jump_from_interpreted(rbx, rax);
3791 }
3792
3793
fast_invokevfinal(int byte_no)3794 void TemplateTable::fast_invokevfinal(int byte_no) {
3795 transition(vtos, vtos);
3796 assert(byte_no == f2_byte, "use this argument");
3797 __ stop("fast_invokevfinal not used on x86");
3798 }
3799
3800
invokeinterface(int byte_no)3801 void TemplateTable::invokeinterface(int byte_no) {
3802 transition(vtos, vtos);
3803 assert(byte_no == f1_byte, "use this argument");
3804 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 Method*
3805 rcx, rdx); // recv, flags
3806
3807 // rax: reference klass (from f1) if interface method
3808 // rbx: method (from f2)
3809 // rcx: receiver
3810 // rdx: flags
3811
3812 // First check for Object case, then private interface method,
3813 // then regular interface method.
3814
3815 // Special case of invokeinterface called for virtual method of
3816 // java.lang.Object. See cpCache.cpp for details.
3817 Label notObjectMethod;
3818 __ movl(rlocals, rdx);
3819 __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3820 __ jcc(Assembler::zero, notObjectMethod);
3821 invokevirtual_helper(rbx, rcx, rdx);
3822 // no return from above
3823 __ bind(notObjectMethod);
3824
3825 Label no_such_interface; // for receiver subtype check
3826 Register recvKlass; // used for exception processing
3827
3828 // Check for private method invocation - indicated by vfinal
3829 Label notVFinal;
3830 __ movl(rlocals, rdx);
3831 __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3832 __ jcc(Assembler::zero, notVFinal);
3833
3834 // Get receiver klass into rlocals - also a null check
3835 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3836 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3837 __ load_klass(rlocals, rcx, tmp_load_klass);
3838
3839 Label subtype;
3840 __ check_klass_subtype(rlocals, rax, rbcp, subtype);
3841 // If we get here the typecheck failed
3842 recvKlass = rdx;
3843 __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
3844 __ jmp(no_such_interface);
3845
3846 __ bind(subtype);
3847
3848 // do the call - rbx is actually the method to call
3849
3850 __ profile_final_call(rdx);
3851 __ profile_arguments_type(rdx, rbx, rbcp, true);
3852
3853 __ jump_from_interpreted(rbx, rdx);
3854 // no return from above
3855 __ bind(notVFinal);
3856
3857 // Get receiver klass into rdx - also a null check
3858 __ restore_locals(); // restore r14
3859 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3860 __ load_klass(rdx, rcx, tmp_load_klass);
3861
3862 Label no_such_method;
3863
3864 // Preserve method for throw_AbstractMethodErrorVerbose.
3865 __ mov(rcx, rbx);
3866 // Receiver subtype check against REFC.
3867 // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
3868 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3869 rdx, rax, noreg,
3870 // outputs: scan temp. reg, scan temp. reg
3871 rbcp, rlocals,
3872 no_such_interface,
3873 /*return_method=*/false);
3874
3875 // profile this call
3876 __ restore_bcp(); // rbcp was destroyed by receiver type check
3877 __ profile_virtual_call(rdx, rbcp, rlocals);
3878
3879 // Get declaring interface class from method, and itable index
3880 __ load_method_holder(rax, rbx);
3881 __ movl(rbx, Address(rbx, Method::itable_index_offset()));
3882 __ subl(rbx, Method::itable_index_max);
3883 __ negl(rbx);
3884
3885 // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
3886 __ mov(rlocals, rdx);
3887 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3888 rlocals, rax, rbx,
3889 // outputs: method, scan temp. reg
3890 rbx, rbcp,
3891 no_such_interface);
3892
3893 // rbx: Method* to call
3894 // rcx: receiver
3895 // Check for abstract method error
3896 // Note: This should be done more efficiently via a throw_abstract_method_error
3897 // interpreter entry point and a conditional jump to it in case of a null
3898 // method.
3899 __ testptr(rbx, rbx);
3900 __ jcc(Assembler::zero, no_such_method);
3901
3902 __ profile_arguments_type(rdx, rbx, rbcp, true);
3903
3904 // do the call
3905 // rcx: receiver
3906 // rbx,: Method*
3907 __ jump_from_interpreted(rbx, rdx);
3908 __ should_not_reach_here();
3909
3910 // exception handling code follows...
3911 // note: must restore interpreter registers to canonical
3912 // state for exception handling to work correctly!
3913
3914 __ bind(no_such_method);
3915 // throw exception
3916 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3917 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3918 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3919 // Pass arguments for generating a verbose error message.
3920 #ifdef _LP64
3921 recvKlass = c_rarg1;
3922 Register method = c_rarg2;
3923 if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
3924 if (method != rcx) { __ movq(method, rcx); }
3925 #else
3926 recvKlass = rdx;
3927 Register method = rcx;
3928 #endif
3929 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3930 recvKlass, method);
3931 // The call_VM checks for exception, so we should never return here.
3932 __ should_not_reach_here();
3933
3934 __ bind(no_such_interface);
3935 // throw exception
3936 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3937 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3938 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3939 // Pass arguments for generating a verbose error message.
3940 LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
3941 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3942 recvKlass, rax);
3943 // the call_VM checks for exception, so we should never return here.
3944 __ should_not_reach_here();
3945 }
3946
invokehandle(int byte_no)3947 void TemplateTable::invokehandle(int byte_no) {
3948 transition(vtos, vtos);
3949 assert(byte_no == f1_byte, "use this argument");
3950 const Register rbx_method = rbx;
3951 const Register rax_mtype = rax;
3952 const Register rcx_recv = rcx;
3953 const Register rdx_flags = rdx;
3954
3955 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3956 __ verify_method_ptr(rbx_method);
3957 __ verify_oop(rcx_recv);
3958 __ null_check(rcx_recv);
3959
3960 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3961 // rbx: MH.invokeExact_MT method (from f2)
3962
3963 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3964
3965 // FIXME: profile the LambdaForm also
3966 __ profile_final_call(rax);
3967 __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3968
3969 __ jump_from_interpreted(rbx_method, rdx);
3970 }
3971
invokedynamic(int byte_no)3972 void TemplateTable::invokedynamic(int byte_no) {
3973 transition(vtos, vtos);
3974 assert(byte_no == f1_byte, "use this argument");
3975
3976 const Register rbx_method = rbx;
3977 const Register rax_callsite = rax;
3978
3979 prepare_invoke(byte_no, rbx_method, rax_callsite);
3980
3981 // rax: CallSite object (from cpool->resolved_references[f1])
3982 // rbx: MH.linkToCallSite method (from f2)
3983
3984 // Note: rax_callsite is already pushed by prepare_invoke
3985
3986 // %%% should make a type profile for any invokedynamic that takes a ref argument
3987 // profile this call
3988 __ profile_call(rbcp);
3989 __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3990
3991 __ verify_oop(rax_callsite);
3992
3993 __ jump_from_interpreted(rbx_method, rdx);
3994 }
3995
3996 //-----------------------------------------------------------------------------
3997 // Allocation
3998
_new()3999 void TemplateTable::_new() {
4000 transition(vtos, atos);
4001 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
4002 Label slow_case;
4003 Label slow_case_no_pop;
4004 Label done;
4005 Label initialize_header;
4006 Label initialize_object; // including clearing the fields
4007
4008 __ get_cpool_and_tags(rcx, rax);
4009
4010 // Make sure the class we're about to instantiate has been resolved.
4011 // This is done before loading InstanceKlass to be consistent with the order
4012 // how Constant Pool is updated (see ConstantPool::klass_at_put)
4013 const int tags_offset = Array<u1>::base_offset_in_bytes();
4014 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4015 __ jcc(Assembler::notEqual, slow_case_no_pop);
4016
4017 // get InstanceKlass
4018 __ load_resolved_klass_at_index(rcx, rcx, rdx);
4019 __ push(rcx); // save the contexts of klass for initializing the header
4020
4021 // make sure klass is initialized & doesn't have finalizer
4022 // make sure klass is fully initialized
4023 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4024 __ jcc(Assembler::notEqual, slow_case);
4025
4026 // get instance_size in InstanceKlass (scaled to a count of bytes)
4027 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
4028 // test to see if it has a finalizer or is malformed in some way
4029 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
4030 __ jcc(Assembler::notZero, slow_case);
4031
4032 // Allocate the instance:
4033 // If TLAB is enabled:
4034 // Try to allocate in the TLAB.
4035 // If fails, go to the slow path.
4036 // Else If inline contiguous allocations are enabled:
4037 // Try to allocate in eden.
4038 // If fails due to heap end, go to slow path.
4039 //
4040 // If TLAB is enabled OR inline contiguous is enabled:
4041 // Initialize the allocation.
4042 // Exit.
4043 //
4044 // Go to slow path.
4045
4046 const bool allow_shared_alloc =
4047 Universe::heap()->supports_inline_contig_alloc();
4048
4049 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
4050 #ifndef _LP64
4051 if (UseTLAB || allow_shared_alloc) {
4052 __ get_thread(thread);
4053 }
4054 #endif // _LP64
4055
4056 if (UseTLAB) {
4057 __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
4058 if (ZeroTLAB) {
4059 // the fields have been already cleared
4060 __ jmp(initialize_header);
4061 } else {
4062 // initialize both the header and fields
4063 __ jmp(initialize_object);
4064 }
4065 } else {
4066 // Allocation in the shared Eden, if allowed.
4067 //
4068 // rdx: instance size in bytes
4069 __ eden_allocate(thread, rax, rdx, 0, rbx, slow_case);
4070 }
4071
4072 // If UseTLAB or allow_shared_alloc are true, the object is created above and
4073 // there is an initialize need. Otherwise, skip and go to the slow path.
4074 if (UseTLAB || allow_shared_alloc) {
4075 // The object is initialized before the header. If the object size is
4076 // zero, go directly to the header initialization.
4077 __ bind(initialize_object);
4078 __ decrement(rdx, sizeof(oopDesc));
4079 __ jcc(Assembler::zero, initialize_header);
4080
4081 // Initialize topmost object field, divide rdx by 8, check if odd and
4082 // test if zero.
4083 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
4084 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4085
4086 // rdx must have been multiple of 8
4087 #ifdef ASSERT
4088 // make sure rdx was multiple of 8
4089 Label L;
4090 // Ignore partial flag stall after shrl() since it is debug VM
4091 __ jcc(Assembler::carryClear, L);
4092 __ stop("object size is not multiple of 2 - adjust this code");
4093 __ bind(L);
4094 // rdx must be > 0, no extra check needed here
4095 #endif
4096
4097 // initialize remaining object fields: rdx was a multiple of 8
4098 { Label loop;
4099 __ bind(loop);
4100 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4101 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4102 __ decrement(rdx);
4103 __ jcc(Assembler::notZero, loop);
4104 }
4105
4106 // initialize object header only.
4107 __ bind(initialize_header);
4108 if (UseBiasedLocking) {
4109 __ pop(rcx); // get saved klass back in the register.
4110 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4111 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
4112 } else {
4113 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
4114 (intptr_t)markWord::prototype().value()); // header
4115 __ pop(rcx); // get saved klass back in the register.
4116 }
4117 #ifdef _LP64
4118 __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4119 __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
4120 #endif
4121 Register tmp_store_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
4122 __ store_klass(rax, rcx, tmp_store_klass); // klass
4123
4124 {
4125 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4126 // Trigger dtrace event for fastpath
4127 __ push(atos);
4128 __ call_VM_leaf(
4129 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4130 __ pop(atos);
4131 }
4132
4133 __ jmp(done);
4134 }
4135
4136 // slow case
4137 __ bind(slow_case);
4138 __ pop(rcx); // restore stack pointer to what it was when we came in.
4139 __ bind(slow_case_no_pop);
4140
4141 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4142 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4143
4144 __ get_constant_pool(rarg1);
4145 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4146 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4147 __ verify_oop(rax);
4148
4149 // continue
4150 __ bind(done);
4151 }
4152
newarray()4153 void TemplateTable::newarray() {
4154 transition(itos, atos);
4155 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4156 __ load_unsigned_byte(rarg1, at_bcp(1));
4157 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4158 rarg1, rax);
4159 }
4160
anewarray()4161 void TemplateTable::anewarray() {
4162 transition(itos, atos);
4163
4164 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4165 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4166
4167 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4168 __ get_constant_pool(rarg1);
4169 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4170 rarg1, rarg2, rax);
4171 }
4172
arraylength()4173 void TemplateTable::arraylength() {
4174 transition(atos, itos);
4175 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4176 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4177 }
4178
checkcast()4179 void TemplateTable::checkcast() {
4180 transition(atos, atos);
4181 Label done, is_null, ok_is_subtype, quicked, resolved;
4182 __ testptr(rax, rax); // object is in rax
4183 __ jcc(Assembler::zero, is_null);
4184
4185 // Get cpool & tags index
4186 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4187 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4188 // See if bytecode has already been quicked
4189 __ cmpb(Address(rdx, rbx,
4190 Address::times_1,
4191 Array<u1>::base_offset_in_bytes()),
4192 JVM_CONSTANT_Class);
4193 __ jcc(Assembler::equal, quicked);
4194 __ push(atos); // save receiver for result, and for GC
4195 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4196
4197 // vm_result_2 has metadata result
4198 #ifndef _LP64
4199 // borrow rdi from locals
4200 __ get_thread(rdi);
4201 __ get_vm_result_2(rax, rdi);
4202 __ restore_locals();
4203 #else
4204 __ get_vm_result_2(rax, r15_thread);
4205 #endif
4206
4207 __ pop_ptr(rdx); // restore receiver
4208 __ jmpb(resolved);
4209
4210 // Get superklass in rax and subklass in rbx
4211 __ bind(quicked);
4212 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4213 __ load_resolved_klass_at_index(rax, rcx, rbx);
4214
4215 __ bind(resolved);
4216 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
4217 __ load_klass(rbx, rdx, tmp_load_klass);
4218
4219 // Generate subtype check. Blows rcx, rdi. Object in rdx.
4220 // Superklass in rax. Subklass in rbx.
4221 __ gen_subtype_check(rbx, ok_is_subtype);
4222
4223 // Come here on failure
4224 __ push_ptr(rdx);
4225 // object is at TOS
4226 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4227
4228 // Come here on success
4229 __ bind(ok_is_subtype);
4230 __ mov(rax, rdx); // Restore object in rdx
4231
4232 // Collect counts on whether this check-cast sees NULLs a lot or not.
4233 if (ProfileInterpreter) {
4234 __ jmp(done);
4235 __ bind(is_null);
4236 __ profile_null_seen(rcx);
4237 } else {
4238 __ bind(is_null); // same as 'done'
4239 }
4240 __ bind(done);
4241 }
4242
instanceof()4243 void TemplateTable::instanceof() {
4244 transition(atos, itos);
4245 Label done, is_null, ok_is_subtype, quicked, resolved;
4246 __ testptr(rax, rax);
4247 __ jcc(Assembler::zero, is_null);
4248
4249 // Get cpool & tags index
4250 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4251 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4252 // See if bytecode has already been quicked
4253 __ cmpb(Address(rdx, rbx,
4254 Address::times_1,
4255 Array<u1>::base_offset_in_bytes()),
4256 JVM_CONSTANT_Class);
4257 __ jcc(Assembler::equal, quicked);
4258
4259 __ push(atos); // save receiver for result, and for GC
4260 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4261 // vm_result_2 has metadata result
4262
4263 #ifndef _LP64
4264 // borrow rdi from locals
4265 __ get_thread(rdi);
4266 __ get_vm_result_2(rax, rdi);
4267 __ restore_locals();
4268 #else
4269 __ get_vm_result_2(rax, r15_thread);
4270 #endif
4271
4272 __ pop_ptr(rdx); // restore receiver
4273 __ verify_oop(rdx);
4274 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
4275 __ load_klass(rdx, rdx, tmp_load_klass);
4276 __ jmpb(resolved);
4277
4278 // Get superklass in rax and subklass in rdx
4279 __ bind(quicked);
4280 __ load_klass(rdx, rax, tmp_load_klass);
4281 __ load_resolved_klass_at_index(rax, rcx, rbx);
4282
4283 __ bind(resolved);
4284
4285 // Generate subtype check. Blows rcx, rdi
4286 // Superklass in rax. Subklass in rdx.
4287 __ gen_subtype_check(rdx, ok_is_subtype);
4288
4289 // Come here on failure
4290 __ xorl(rax, rax);
4291 __ jmpb(done);
4292 // Come here on success
4293 __ bind(ok_is_subtype);
4294 __ movl(rax, 1);
4295
4296 // Collect counts on whether this test sees NULLs a lot or not.
4297 if (ProfileInterpreter) {
4298 __ jmp(done);
4299 __ bind(is_null);
4300 __ profile_null_seen(rcx);
4301 } else {
4302 __ bind(is_null); // same as 'done'
4303 }
4304 __ bind(done);
4305 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
4306 // rax = 1: obj != NULL and obj is an instanceof the specified klass
4307 }
4308
4309
4310 //----------------------------------------------------------------------------------------------------
4311 // Breakpoints
_breakpoint()4312 void TemplateTable::_breakpoint() {
4313 // Note: We get here even if we are single stepping..
4314 // jbug insists on setting breakpoints at every bytecode
4315 // even if we are in single step mode.
4316
4317 transition(vtos, vtos);
4318
4319 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4320
4321 // get the unpatched byte code
4322 __ get_method(rarg);
4323 __ call_VM(noreg,
4324 CAST_FROM_FN_PTR(address,
4325 InterpreterRuntime::get_original_bytecode_at),
4326 rarg, rbcp);
4327 __ mov(rbx, rax); // why?
4328
4329 // post the breakpoint event
4330 __ get_method(rarg);
4331 __ call_VM(noreg,
4332 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4333 rarg, rbcp);
4334
4335 // complete the execution of original bytecode
4336 __ dispatch_only_normal(vtos);
4337 }
4338
4339 //-----------------------------------------------------------------------------
4340 // Exceptions
4341
athrow()4342 void TemplateTable::athrow() {
4343 transition(atos, vtos);
4344 __ null_check(rax);
4345 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4346 }
4347
4348 //-----------------------------------------------------------------------------
4349 // Synchronization
4350 //
4351 // Note: monitorenter & exit are symmetric routines; which is reflected
4352 // in the assembly code structure as well
4353 //
4354 // Stack layout:
4355 //
4356 // [expressions ] <--- rsp = expression stack top
4357 // ..
4358 // [expressions ]
4359 // [monitor entry] <--- monitor block top = expression stack bot
4360 // ..
4361 // [monitor entry]
4362 // [frame data ] <--- monitor block bot
4363 // ...
4364 // [saved rbp ] <--- rbp
monitorenter()4365 void TemplateTable::monitorenter() {
4366 transition(atos, vtos);
4367
4368 // check for NULL object
4369 __ null_check(rax);
4370
4371 __ resolve(IS_NOT_NULL, rax);
4372
4373 const Address monitor_block_top(
4374 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4375 const Address monitor_block_bot(
4376 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4377 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4378
4379 Label allocated;
4380
4381 Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4382 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4383 Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4384
4385 // initialize entry pointer
4386 __ xorl(rmon, rmon); // points to free slot or NULL
4387
4388 // find a free slot in the monitor block (result in rmon)
4389 {
4390 Label entry, loop, exit;
4391 __ movptr(rtop, monitor_block_top); // points to current entry,
4392 // starting with top-most entry
4393 __ lea(rbot, monitor_block_bot); // points to word before bottom
4394 // of monitor block
4395 __ jmpb(entry);
4396
4397 __ bind(loop);
4398 // check if current entry is used
4399 __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4400 // if not used then remember entry in rmon
4401 __ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr
4402 // check if current entry is for same object
4403 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4404 // if same object then stop searching
4405 __ jccb(Assembler::equal, exit);
4406 // otherwise advance to next entry
4407 __ addptr(rtop, entry_size);
4408 __ bind(entry);
4409 // check if bottom reached
4410 __ cmpptr(rtop, rbot);
4411 // if not at bottom then check this entry
4412 __ jcc(Assembler::notEqual, loop);
4413 __ bind(exit);
4414 }
4415
4416 __ testptr(rmon, rmon); // check if a slot has been found
4417 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4418
4419 // allocate one if there's no free slot
4420 {
4421 Label entry, loop;
4422 // 1. compute new pointers // rsp: old expression stack top
4423 __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4424 __ subptr(rsp, entry_size); // move expression stack top
4425 __ subptr(rmon, entry_size); // move expression stack bottom
4426 __ mov(rtop, rsp); // set start value for copy loop
4427 __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4428 __ jmp(entry);
4429 // 2. move expression stack contents
4430 __ bind(loop);
4431 __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4432 // word from old location
4433 __ movptr(Address(rtop, 0), rbot); // and store it at new location
4434 __ addptr(rtop, wordSize); // advance to next word
4435 __ bind(entry);
4436 __ cmpptr(rtop, rmon); // check if bottom reached
4437 __ jcc(Assembler::notEqual, loop); // if not at bottom then
4438 // copy next word
4439 }
4440
4441 // call run-time routine
4442 // rmon: points to monitor entry
4443 __ bind(allocated);
4444
4445 // Increment bcp to point to the next bytecode, so exception
4446 // handling for async. exceptions work correctly.
4447 // The object has already been poped from the stack, so the
4448 // expression stack looks correct.
4449 __ increment(rbcp);
4450
4451 // store object
4452 __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4453 __ lock_object(rmon);
4454
4455 // check to make sure this monitor doesn't cause stack overflow after locking
4456 __ save_bcp(); // in case of exception
4457 __ generate_stack_overflow_check(0);
4458
4459 // The bcp has already been incremented. Just need to dispatch to
4460 // next instruction.
4461 __ dispatch_next(vtos);
4462 }
4463
monitorexit()4464 void TemplateTable::monitorexit() {
4465 transition(atos, vtos);
4466
4467 // check for NULL object
4468 __ null_check(rax);
4469
4470 __ resolve(IS_NOT_NULL, rax);
4471
4472 const Address monitor_block_top(
4473 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4474 const Address monitor_block_bot(
4475 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4476 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4477
4478 Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4479 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4480
4481 Label found;
4482
4483 // find matching slot
4484 {
4485 Label entry, loop;
4486 __ movptr(rtop, monitor_block_top); // points to current entry,
4487 // starting with top-most entry
4488 __ lea(rbot, monitor_block_bot); // points to word before bottom
4489 // of monitor block
4490 __ jmpb(entry);
4491
4492 __ bind(loop);
4493 // check if current entry is for same object
4494 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4495 // if same object then stop searching
4496 __ jcc(Assembler::equal, found);
4497 // otherwise advance to next entry
4498 __ addptr(rtop, entry_size);
4499 __ bind(entry);
4500 // check if bottom reached
4501 __ cmpptr(rtop, rbot);
4502 // if not at bottom then check this entry
4503 __ jcc(Assembler::notEqual, loop);
4504 }
4505
4506 // error handling. Unlocking was not block-structured
4507 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4508 InterpreterRuntime::throw_illegal_monitor_state_exception));
4509 __ should_not_reach_here();
4510
4511 // call run-time routine
4512 __ bind(found);
4513 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4514 __ unlock_object(rtop);
4515 __ pop_ptr(rax); // discard object
4516 }
4517
4518 // Wide instructions
wide()4519 void TemplateTable::wide() {
4520 transition(vtos, vtos);
4521 __ load_unsigned_byte(rbx, at_bcp(1));
4522 ExternalAddress wtable((address)Interpreter::_wentry_point);
4523 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4524 // Note: the rbcp increment step is part of the individual wide bytecode implementations
4525 }
4526
4527 // Multi arrays
multianewarray()4528 void TemplateTable::multianewarray() {
4529 transition(vtos, atos);
4530
4531 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4532 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4533 // last dim is on top of stack; we want address of first one:
4534 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4535 // the latter wordSize to point to the beginning of the array.
4536 __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4537 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4538 __ load_unsigned_byte(rbx, at_bcp(3));
4539 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
4540 }
4541