1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/shared/barrierSetAssembler.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "interpreter/interp_masm.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/safepointMechanism.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "runtime/synchronizer.hpp"
42 #include "utilities/macros.hpp"
43
44 #define __ _masm->
45
46 // Misc helpers
47
48 // Do an oop store like *(base + index + offset) = val
49 // index can be noreg,
do_oop_store(InterpreterMacroAssembler * _masm,Register base,Register index,int offset,Register val,Register tmp,DecoratorSet decorators=0)50 static void do_oop_store(InterpreterMacroAssembler* _masm,
51 Register base,
52 Register index,
53 int offset,
54 Register val,
55 Register tmp,
56 DecoratorSet decorators = 0) {
57 assert(tmp != val && tmp != base && tmp != index, "register collision");
58 assert(index == noreg || offset == 0, "only one offset");
59 if (index == noreg) {
60 __ store_heap_oop(val, base, offset, tmp, decorators);
61 } else {
62 __ store_heap_oop(val, base, index, tmp, decorators);
63 }
64 }
65
66 // Do an oop load like val = *(base + index + offset)
67 // index can be noreg.
do_oop_load(InterpreterMacroAssembler * _masm,Register base,Register index,int offset,Register dst,Register tmp,DecoratorSet decorators=0)68 static void do_oop_load(InterpreterMacroAssembler* _masm,
69 Register base,
70 Register index,
71 int offset,
72 Register dst,
73 Register tmp,
74 DecoratorSet decorators = 0) {
75 assert(tmp != dst && tmp != base && tmp != index, "register collision");
76 assert(index == noreg || offset == 0, "only one offset");
77 if (index == noreg) {
78 __ load_heap_oop(base, offset, dst, tmp, decorators);
79 } else {
80 __ load_heap_oop(base, index, dst, tmp, decorators);
81 }
82 }
83
84
85 //----------------------------------------------------------------------------------------------------
86 // Platform-dependent initialization
87
pd_initialize()88 void TemplateTable::pd_initialize() {
89 // (none)
90 }
91
92
93 //----------------------------------------------------------------------------------------------------
94 // Condition conversion
ccNot(TemplateTable::Condition cc)95 Assembler::Condition ccNot(TemplateTable::Condition cc) {
96 switch (cc) {
97 case TemplateTable::equal : return Assembler::notEqual;
98 case TemplateTable::not_equal : return Assembler::equal;
99 case TemplateTable::less : return Assembler::greaterEqual;
100 case TemplateTable::less_equal : return Assembler::greater;
101 case TemplateTable::greater : return Assembler::lessEqual;
102 case TemplateTable::greater_equal: return Assembler::less;
103 }
104 ShouldNotReachHere();
105 return Assembler::zero;
106 }
107
108 //----------------------------------------------------------------------------------------------------
109 // Miscelaneous helper routines
110
111
at_bcp(int offset)112 Address TemplateTable::at_bcp(int offset) {
113 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
114 return Address(Lbcp, offset);
115 }
116
117
patch_bytecode(Bytecodes::Code bc,Register bc_reg,Register temp_reg,bool load_bc_into_bc_reg,int byte_no)118 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
119 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
120 int byte_no) {
121 // With sharing on, may need to test Method* flag.
122 if (!RewriteBytecodes) return;
123 Label L_patch_done;
124
125 switch (bc) {
126 case Bytecodes::_fast_aputfield:
127 case Bytecodes::_fast_bputfield:
128 case Bytecodes::_fast_zputfield:
129 case Bytecodes::_fast_cputfield:
130 case Bytecodes::_fast_dputfield:
131 case Bytecodes::_fast_fputfield:
132 case Bytecodes::_fast_iputfield:
133 case Bytecodes::_fast_lputfield:
134 case Bytecodes::_fast_sputfield:
135 {
136 // We skip bytecode quickening for putfield instructions when
137 // the put_code written to the constant pool cache is zero.
138 // This is required so that every execution of this instruction
139 // calls out to InterpreterRuntime::resolve_get_put to do
140 // additional, required work.
141 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
142 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
143 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
144 __ set(bc, bc_reg);
145 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
146 }
147 break;
148 default:
149 assert(byte_no == -1, "sanity");
150 if (load_bc_into_bc_reg) {
151 __ set(bc, bc_reg);
152 }
153 }
154
155 if (JvmtiExport::can_post_breakpoint()) {
156 Label L_fast_patch;
157 __ ldub(at_bcp(0), temp_reg);
158 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
159 // perform the quickening, slowly, in the bowels of the breakpoint table
160 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
161 __ ba_short(L_patch_done);
162 __ bind(L_fast_patch);
163 }
164
165 #ifdef ASSERT
166 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
167 Label L_okay;
168 __ ldub(at_bcp(0), temp_reg);
169 __ cmp(temp_reg, orig_bytecode);
170 __ br(Assembler::equal, false, Assembler::pt, L_okay);
171 __ delayed()->cmp(temp_reg, bc_reg);
172 __ br(Assembler::equal, false, Assembler::pt, L_okay);
173 __ delayed()->nop();
174 __ stop("patching the wrong bytecode");
175 __ bind(L_okay);
176 #endif
177
178 // patch bytecode
179 __ stb(bc_reg, at_bcp(0));
180 __ bind(L_patch_done);
181 }
182
183 //----------------------------------------------------------------------------------------------------
184 // Individual instructions
185
nop()186 void TemplateTable::nop() {
187 transition(vtos, vtos);
188 // nothing to do
189 }
190
shouldnotreachhere()191 void TemplateTable::shouldnotreachhere() {
192 transition(vtos, vtos);
193 __ stop("shouldnotreachhere bytecode");
194 }
195
aconst_null()196 void TemplateTable::aconst_null() {
197 transition(vtos, atos);
198 __ clr(Otos_i);
199 }
200
201
iconst(int value)202 void TemplateTable::iconst(int value) {
203 transition(vtos, itos);
204 __ set(value, Otos_i);
205 }
206
207
lconst(int value)208 void TemplateTable::lconst(int value) {
209 transition(vtos, ltos);
210 assert(value >= 0, "check this code");
211 __ set(value, Otos_l);
212 }
213
214
fconst(int value)215 void TemplateTable::fconst(int value) {
216 transition(vtos, ftos);
217 static float zero = 0.0, one = 1.0, two = 2.0;
218 float* p;
219 switch( value ) {
220 default: ShouldNotReachHere();
221 case 0: p = &zero; break;
222 case 1: p = &one; break;
223 case 2: p = &two; break;
224 }
225 AddressLiteral a(p);
226 __ sethi(a, G3_scratch);
227 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
228 }
229
230
dconst(int value)231 void TemplateTable::dconst(int value) {
232 transition(vtos, dtos);
233 static double zero = 0.0, one = 1.0;
234 double* p;
235 switch( value ) {
236 default: ShouldNotReachHere();
237 case 0: p = &zero; break;
238 case 1: p = &one; break;
239 }
240 AddressLiteral a(p);
241 __ sethi(a, G3_scratch);
242 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
243 }
244
245
246 // %%%%% Should factore most snippet templates across platforms
247
bipush()248 void TemplateTable::bipush() {
249 transition(vtos, itos);
250 __ ldsb( at_bcp(1), Otos_i );
251 }
252
sipush()253 void TemplateTable::sipush() {
254 transition(vtos, itos);
255 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
256 }
257
ldc(bool wide)258 void TemplateTable::ldc(bool wide) {
259 transition(vtos, vtos);
260 Label call_ldc, notInt, isString, notString, notClass, notFloat, exit;
261
262 if (wide) {
263 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
264 } else {
265 __ ldub(Lbcp, 1, O1);
266 }
267 __ get_cpool_and_tags(O0, O2);
268
269 const int base_offset = ConstantPool::header_size() * wordSize;
270 const int tags_offset = Array<u1>::base_offset_in_bytes();
271
272 // get type from tags
273 __ add(O2, tags_offset, O2);
274 __ ldub(O2, O1, O2);
275
276 // unresolved class? If so, must resolve
277 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
278
279 // unresolved class in error state
280 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
281
282 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
283 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
284 __ delayed()->add(O0, base_offset, O0);
285
286 __ bind(call_ldc);
287 __ set(wide, O1);
288 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
289 __ push(atos);
290 __ ba(exit);
291 __ delayed()->nop();
292
293 __ bind(notClass);
294 // __ add(O0, base_offset, O0);
295 __ sll(O1, LogBytesPerWord, O1);
296 __ cmp(O2, JVM_CONSTANT_Integer);
297 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
298 __ delayed()->cmp(O2, JVM_CONSTANT_String);
299 __ ld(O0, O1, Otos_i);
300 __ push(itos);
301 __ ba(exit);
302 __ delayed()->nop();
303
304 __ bind(notInt);
305 // __ cmp(O2, JVM_CONSTANT_String);
306 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
307 __ delayed()->cmp(O2, JVM_CONSTANT_Float);
308 __ bind(isString);
309 __ stop("string should be rewritten to fast_aldc");
310 __ ba(exit);
311 __ delayed()->nop();
312
313 __ bind(notString);
314 //__ cmp(O2, JVM_CONSTANT_Float);
315 __ brx(Assembler::notEqual, true, Assembler::pt, notFloat);
316 __ delayed()->nop();
317 __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
318 __ push(ftos);
319 __ ba(exit);
320 __ delayed()->nop();
321
322 // assume the tag is for condy; if not, the VM runtime will tell us
323 __ bind(notFloat);
324 condy_helper(exit);
325
326 __ bind(exit);
327 }
328
329 // Fast path for caching oop constants.
330 // %%% We should use this to handle Class and String constants also.
331 // %%% It will simplify the ldc/primitive path considerably.
fast_aldc(bool wide)332 void TemplateTable::fast_aldc(bool wide) {
333 transition(vtos, atos);
334
335 int index_size = wide ? sizeof(u2) : sizeof(u1);
336 Label resolved;
337
338 // We are resolved if the resolved reference cache entry contains a
339 // non-null object (CallSite, etc.)
340 assert_different_registers(Otos_i, G3_scratch);
341 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch
342 __ load_resolved_reference_at_index(Otos_i, G3_scratch, Lscratch);
343 __ tst(Otos_i);
344 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
345 __ delayed()->set((int)bytecode(), O1);
346
347 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
348
349 // first time invocation - must resolve first
350 __ call_VM(Otos_i, entry, O1);
351 __ bind(resolved);
352
353 { // Check for the null sentinel.
354 // If we just called the VM, it already did the mapping for us,
355 // but it's harmless to retry.
356 Label notNull;
357 __ set(ExternalAddress((address)Universe::the_null_sentinel_addr()), G3_scratch);
358 __ ld_ptr(G3_scratch, 0, G3_scratch);
359 __ cmp(G3_scratch, Otos_i);
360 __ br(Assembler::notEqual, true, Assembler::pt, notNull);
361 __ delayed()->nop();
362 __ clr(Otos_i); // NULL object reference
363 __ bind(notNull);
364 }
365
366 // Safe to call with 0 result
367 __ verify_oop(Otos_i);
368 }
369
ldc2_w()370 void TemplateTable::ldc2_w() {
371 transition(vtos, vtos);
372 Label notDouble, notLong, exit;
373
374 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
375 __ get_cpool_and_tags(O0, O2);
376
377 const int base_offset = ConstantPool::header_size() * wordSize;
378 const int tags_offset = Array<u1>::base_offset_in_bytes();
379 // get type from tags
380 __ add(O2, tags_offset, O2);
381 __ ldub(O2, O1, O2);
382
383 __ sll(O1, LogBytesPerWord, O1);
384 __ add(O0, O1, G3_scratch);
385
386 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, notDouble);
387 // A double can be placed at word-aligned locations in the constant pool.
388 // Check out Conversions.java for an example.
389 // Also ConstantPool::header_size() is 20, which makes it very difficult
390 // to double-align double on the constant pool. SG, 11/7/97
391 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
392 __ push(dtos);
393 __ ba_short(exit);
394
395 __ bind(notDouble);
396 __ cmp_and_brx_short(O2, JVM_CONSTANT_Long, Assembler::notEqual, Assembler::pt, notLong);
397 __ ldx(G3_scratch, base_offset, Otos_l);
398 __ push(ltos);
399 __ ba_short(exit);
400
401 __ bind(notLong);
402 condy_helper(exit);
403
404 __ bind(exit);
405 }
406
condy_helper(Label & exit)407 void TemplateTable::condy_helper(Label& exit) {
408 Register Robj = Otos_i;
409 Register Roffset = G4_scratch;
410 Register Rflags = G1_scratch;
411
412 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
413
414 __ set((int)bytecode(), O1);
415 __ call_VM(Robj, entry, O1);
416
417 // Get vm_result_2 has flags = (tos, off) using format CPCE::_flags
418 __ get_vm_result_2(G3_scratch);
419
420 // Get offset
421 __ set((int)ConstantPoolCacheEntry::field_index_mask, Roffset);
422 __ and3(G3_scratch, Roffset, Roffset);
423
424 // compute type
425 __ srl(G3_scratch, ConstantPoolCacheEntry::tos_state_shift, Rflags);
426 // Make sure we don't need to mask Rflags after the above shift
427 ConstantPoolCacheEntry::verify_tos_state_shift();
428
429 switch (bytecode()) {
430 case Bytecodes::_ldc:
431 case Bytecodes::_ldc_w:
432 {
433 // tos in (itos, ftos, stos, btos, ctos, ztos)
434 Label notInt, notFloat, notShort, notByte, notChar, notBool;
435 __ cmp(Rflags, itos);
436 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
437 __ delayed()->cmp(Rflags, ftos);
438 // itos
439 __ ld(Robj, Roffset, Otos_i);
440 __ push(itos);
441 __ ba_short(exit);
442
443 __ bind(notInt);
444 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
445 __ delayed()->cmp(Rflags, stos);
446 // ftos
447 __ ldf(FloatRegisterImpl::S, Robj, Roffset, Ftos_f);
448 __ push(ftos);
449 __ ba_short(exit);
450
451 __ bind(notFloat);
452 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
453 __ delayed()->cmp(Rflags, btos);
454 // stos
455 __ ldsh(Robj, Roffset, Otos_i);
456 __ push(itos);
457 __ ba_short(exit);
458
459 __ bind(notShort);
460 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
461 __ delayed()->cmp(Rflags, ctos);
462 // btos
463 __ ldsb(Robj, Roffset, Otos_i);
464 __ push(itos);
465 __ ba_short(exit);
466
467 __ bind(notByte);
468 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
469 __ delayed()->cmp(Rflags, ztos);
470 // ctos
471 __ lduh(Robj, Roffset, Otos_i);
472 __ push(itos);
473 __ ba_short(exit);
474
475 __ bind(notChar);
476 __ br(Assembler::notEqual, false, Assembler::pt, notBool);
477 __ delayed()->nop();
478 // ztos
479 __ ldsb(Robj, Roffset, Otos_i);
480 __ push(itos);
481 __ ba_short(exit);
482
483 __ bind(notBool);
484 break;
485 }
486
487 case Bytecodes::_ldc2_w:
488 {
489 Label notLong, notDouble;
490 __ cmp(Rflags, ltos);
491 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
492 __ delayed()->cmp(Rflags, dtos);
493 // ltos
494 // load must be atomic
495 __ ld_long(Robj, Roffset, Otos_l);
496 __ push(ltos);
497 __ ba_short(exit);
498
499 __ bind(notLong);
500 __ br(Assembler::notEqual, false, Assembler::pt, notDouble);
501 __ delayed()->nop();
502 // dtos
503 __ ldf(FloatRegisterImpl::D, Robj, Roffset, Ftos_d);
504 __ push(dtos);
505 __ ba_short(exit);
506
507 __ bind(notDouble);
508 break;
509 }
510
511 default:
512 ShouldNotReachHere();
513 }
514
515 __ stop("bad ldc/condy");
516
517 __ bind(exit);
518 }
519
locals_index(Register reg,int offset)520 void TemplateTable::locals_index(Register reg, int offset) {
521 __ ldub( at_bcp(offset), reg );
522 }
523
locals_index_wide(Register reg)524 void TemplateTable::locals_index_wide(Register reg) {
525 // offset is 2, not 1, because Lbcp points to wide prefix code
526 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
527 }
528
iload()529 void TemplateTable::iload() {
530 iload_internal();
531 }
532
nofast_iload()533 void TemplateTable::nofast_iload() {
534 iload_internal(may_not_rewrite);
535 }
536
iload_internal(RewriteControl rc)537 void TemplateTable::iload_internal(RewriteControl rc) {
538 transition(vtos, itos);
539 // Rewrite iload,iload pair into fast_iload2
540 // iload,caload pair into fast_icaload
541 if (RewriteFrequentPairs && rc == may_rewrite) {
542 Label rewrite, done;
543
544 // get next byte
545 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
546
547 // if _iload, wait to rewrite to iload2. We only want to rewrite the
548 // last two iloads in a pair. Comparing against fast_iload means that
549 // the next bytecode is neither an iload or a caload, and therefore
550 // an iload pair.
551 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
552
553 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
554 __ br(Assembler::equal, false, Assembler::pn, rewrite);
555 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
556
557 __ cmp(G3_scratch, (int)Bytecodes::_caload);
558 __ br(Assembler::equal, false, Assembler::pn, rewrite);
559 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
560
561 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
562 // rewrite
563 // G4_scratch: fast bytecode
564 __ bind(rewrite);
565 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
566 __ bind(done);
567 }
568
569 // Get the local value into tos
570 locals_index(G3_scratch);
571 __ access_local_int( G3_scratch, Otos_i );
572 }
573
fast_iload2()574 void TemplateTable::fast_iload2() {
575 transition(vtos, itos);
576 locals_index(G3_scratch);
577 __ access_local_int( G3_scratch, Otos_i );
578 __ push_i();
579 locals_index(G3_scratch, 3); // get next bytecode's local index.
580 __ access_local_int( G3_scratch, Otos_i );
581 }
582
fast_iload()583 void TemplateTable::fast_iload() {
584 transition(vtos, itos);
585 locals_index(G3_scratch);
586 __ access_local_int( G3_scratch, Otos_i );
587 }
588
lload()589 void TemplateTable::lload() {
590 transition(vtos, ltos);
591 locals_index(G3_scratch);
592 __ access_local_long( G3_scratch, Otos_l );
593 }
594
595
fload()596 void TemplateTable::fload() {
597 transition(vtos, ftos);
598 locals_index(G3_scratch);
599 __ access_local_float( G3_scratch, Ftos_f );
600 }
601
602
dload()603 void TemplateTable::dload() {
604 transition(vtos, dtos);
605 locals_index(G3_scratch);
606 __ access_local_double( G3_scratch, Ftos_d );
607 }
608
609
aload()610 void TemplateTable::aload() {
611 transition(vtos, atos);
612 locals_index(G3_scratch);
613 __ access_local_ptr( G3_scratch, Otos_i);
614 }
615
616
wide_iload()617 void TemplateTable::wide_iload() {
618 transition(vtos, itos);
619 locals_index_wide(G3_scratch);
620 __ access_local_int( G3_scratch, Otos_i );
621 }
622
623
wide_lload()624 void TemplateTable::wide_lload() {
625 transition(vtos, ltos);
626 locals_index_wide(G3_scratch);
627 __ access_local_long( G3_scratch, Otos_l );
628 }
629
630
wide_fload()631 void TemplateTable::wide_fload() {
632 transition(vtos, ftos);
633 locals_index_wide(G3_scratch);
634 __ access_local_float( G3_scratch, Ftos_f );
635 }
636
637
wide_dload()638 void TemplateTable::wide_dload() {
639 transition(vtos, dtos);
640 locals_index_wide(G3_scratch);
641 __ access_local_double( G3_scratch, Ftos_d );
642 }
643
644
wide_aload()645 void TemplateTable::wide_aload() {
646 transition(vtos, atos);
647 locals_index_wide(G3_scratch);
648 __ access_local_ptr( G3_scratch, Otos_i );
649 __ verify_oop(Otos_i);
650 }
651
652
iaload()653 void TemplateTable::iaload() {
654 transition(itos, itos);
655 // Otos_i: index
656 // tos: array
657 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
658 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
659 }
660
661
laload()662 void TemplateTable::laload() {
663 transition(itos, ltos);
664 // Otos_i: index
665 // O2: array
666 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
667 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
668 }
669
670
faload()671 void TemplateTable::faload() {
672 transition(itos, ftos);
673 // Otos_i: index
674 // O2: array
675 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
676 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
677 }
678
679
daload()680 void TemplateTable::daload() {
681 transition(itos, dtos);
682 // Otos_i: index
683 // O2: array
684 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
685 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
686 }
687
688
aaload()689 void TemplateTable::aaload() {
690 transition(itos, atos);
691 // Otos_i: index
692 // tos: array
693 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
694 do_oop_load(_masm,
695 O3,
696 noreg,
697 arrayOopDesc::base_offset_in_bytes(T_OBJECT),
698 Otos_i,
699 G3_scratch,
700 IS_ARRAY);
701 __ verify_oop(Otos_i);
702 }
703
704
baload()705 void TemplateTable::baload() {
706 transition(itos, itos);
707 // Otos_i: index
708 // tos: array
709 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
710 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
711 }
712
713
caload()714 void TemplateTable::caload() {
715 transition(itos, itos);
716 // Otos_i: index
717 // tos: array
718 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
719 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
720 }
721
fast_icaload()722 void TemplateTable::fast_icaload() {
723 transition(vtos, itos);
724 // Otos_i: index
725 // tos: array
726 locals_index(G3_scratch);
727 __ access_local_int( G3_scratch, Otos_i );
728 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
729 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
730 }
731
732
saload()733 void TemplateTable::saload() {
734 transition(itos, itos);
735 // Otos_i: index
736 // tos: array
737 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
738 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
739 }
740
741
iload(int n)742 void TemplateTable::iload(int n) {
743 transition(vtos, itos);
744 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
745 }
746
747
lload(int n)748 void TemplateTable::lload(int n) {
749 transition(vtos, ltos);
750 assert(n+1 < Argument::n_register_parameters, "would need more code");
751 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
752 }
753
754
fload(int n)755 void TemplateTable::fload(int n) {
756 transition(vtos, ftos);
757 assert(n < Argument::n_register_parameters, "would need more code");
758 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
759 }
760
761
dload(int n)762 void TemplateTable::dload(int n) {
763 transition(vtos, dtos);
764 FloatRegister dst = Ftos_d;
765 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
766 }
767
768
aload(int n)769 void TemplateTable::aload(int n) {
770 transition(vtos, atos);
771 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
772 }
773
aload_0()774 void TemplateTable::aload_0() {
775 aload_0_internal();
776 }
777
nofast_aload_0()778 void TemplateTable::nofast_aload_0() {
779 aload_0_internal(may_not_rewrite);
780 }
781
aload_0_internal(RewriteControl rc)782 void TemplateTable::aload_0_internal(RewriteControl rc) {
783 transition(vtos, atos);
784
785 // According to bytecode histograms, the pairs:
786 //
787 // _aload_0, _fast_igetfield (itos)
788 // _aload_0, _fast_agetfield (atos)
789 // _aload_0, _fast_fgetfield (ftos)
790 //
791 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
792 // bytecode checks the next bytecode and then rewrites the current
793 // bytecode into a pair bytecode; otherwise it rewrites the current
794 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
795 //
796 if (RewriteFrequentPairs && rc == may_rewrite) {
797 Label rewrite, done;
798
799 // get next byte
800 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
801
802 // if _getfield then wait with rewrite
803 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
804
805 // if _igetfield then rewrite to _fast_iaccess_0
806 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
807 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
808 __ br(Assembler::equal, false, Assembler::pn, rewrite);
809 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
810
811 // if _agetfield then rewrite to _fast_aaccess_0
812 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
813 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
814 __ br(Assembler::equal, false, Assembler::pn, rewrite);
815 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
816
817 // if _fgetfield then rewrite to _fast_faccess_0
818 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
819 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
820 __ br(Assembler::equal, false, Assembler::pn, rewrite);
821 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
822
823 // else rewrite to _fast_aload0
824 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
825 __ set(Bytecodes::_fast_aload_0, G4_scratch);
826
827 // rewrite
828 // G4_scratch: fast bytecode
829 __ bind(rewrite);
830 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
831 __ bind(done);
832 }
833
834 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
835 aload(0);
836 }
837
istore()838 void TemplateTable::istore() {
839 transition(itos, vtos);
840 locals_index(G3_scratch);
841 __ store_local_int( G3_scratch, Otos_i );
842 }
843
844
lstore()845 void TemplateTable::lstore() {
846 transition(ltos, vtos);
847 locals_index(G3_scratch);
848 __ store_local_long( G3_scratch, Otos_l );
849 }
850
851
fstore()852 void TemplateTable::fstore() {
853 transition(ftos, vtos);
854 locals_index(G3_scratch);
855 __ store_local_float( G3_scratch, Ftos_f );
856 }
857
858
dstore()859 void TemplateTable::dstore() {
860 transition(dtos, vtos);
861 locals_index(G3_scratch);
862 __ store_local_double( G3_scratch, Ftos_d );
863 }
864
865
astore()866 void TemplateTable::astore() {
867 transition(vtos, vtos);
868 __ load_ptr(0, Otos_i);
869 __ inc(Lesp, Interpreter::stackElementSize);
870 __ verify_oop_or_return_address(Otos_i, G3_scratch);
871 locals_index(G3_scratch);
872 __ store_local_ptr(G3_scratch, Otos_i);
873 }
874
875
wide_istore()876 void TemplateTable::wide_istore() {
877 transition(vtos, vtos);
878 __ pop_i();
879 locals_index_wide(G3_scratch);
880 __ store_local_int( G3_scratch, Otos_i );
881 }
882
883
wide_lstore()884 void TemplateTable::wide_lstore() {
885 transition(vtos, vtos);
886 __ pop_l();
887 locals_index_wide(G3_scratch);
888 __ store_local_long( G3_scratch, Otos_l );
889 }
890
891
wide_fstore()892 void TemplateTable::wide_fstore() {
893 transition(vtos, vtos);
894 __ pop_f();
895 locals_index_wide(G3_scratch);
896 __ store_local_float( G3_scratch, Ftos_f );
897 }
898
899
wide_dstore()900 void TemplateTable::wide_dstore() {
901 transition(vtos, vtos);
902 __ pop_d();
903 locals_index_wide(G3_scratch);
904 __ store_local_double( G3_scratch, Ftos_d );
905 }
906
907
wide_astore()908 void TemplateTable::wide_astore() {
909 transition(vtos, vtos);
910 __ load_ptr(0, Otos_i);
911 __ inc(Lesp, Interpreter::stackElementSize);
912 __ verify_oop_or_return_address(Otos_i, G3_scratch);
913 locals_index_wide(G3_scratch);
914 __ store_local_ptr(G3_scratch, Otos_i);
915 }
916
917
iastore()918 void TemplateTable::iastore() {
919 transition(itos, vtos);
920 __ pop_i(O2); // index
921 // Otos_i: val
922 // O3: array
923 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
924 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
925 }
926
927
lastore()928 void TemplateTable::lastore() {
929 transition(ltos, vtos);
930 __ pop_i(O2); // index
931 // Otos_l: val
932 // O3: array
933 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
934 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
935 }
936
937
fastore()938 void TemplateTable::fastore() {
939 transition(ftos, vtos);
940 __ pop_i(O2); // index
941 // Ftos_f: val
942 // O3: array
943 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
944 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
945 }
946
947
dastore()948 void TemplateTable::dastore() {
949 transition(dtos, vtos);
950 __ pop_i(O2); // index
951 // Fos_d: val
952 // O3: array
953 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
954 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
955 }
956
957
aastore()958 void TemplateTable::aastore() {
959 Label store_ok, is_null, done;
960 transition(vtos, vtos);
961 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
962 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
963 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
964 // Otos_i: val
965 // O2: index
966 // O3: array
967 __ verify_oop(Otos_i);
968 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
969
970 // do array store check - check for NULL value first
971 __ br_null_short( Otos_i, Assembler::pn, is_null );
972
973 __ load_klass(O3, O4); // get array klass
974 __ load_klass(Otos_i, O5); // get value klass
975
976 // do fast instanceof cache test
977
978 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4);
979
980 assert(Otos_i == O0, "just checking");
981
982 // Otos_i: value
983 // O1: addr - offset
984 // O2: index
985 // O3: array
986 // O4: array element klass
987 // O5: value klass
988
989 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
990
991 // Generate a fast subtype check. Branch to store_ok if no
992 // failure. Throw if failure.
993 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
994
995 // Not a subtype; so must throw exception
996 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
997
998 // Store is OK.
999 __ bind(store_ok);
1000 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, IS_ARRAY);
1001
1002 __ ba(done);
1003 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
1004
1005 __ bind(is_null);
1006 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, IS_ARRAY);
1007
1008 __ profile_null_seen(G3_scratch);
1009 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
1010 __ bind(done);
1011 }
1012
1013
bastore()1014 void TemplateTable::bastore() {
1015 transition(itos, vtos);
1016 __ pop_i(O2); // index
1017 // Otos_i: val
1018 // O2: index
1019 // O3: array
1020 __ index_check(O3, O2, 0, G3_scratch, O2);
1021 // Need to check whether array is boolean or byte
1022 // since both types share the bastore bytecode.
1023 __ load_klass(O3, G4_scratch);
1024 __ ld(G4_scratch, in_bytes(Klass::layout_helper_offset()), G4_scratch);
1025 __ set(Klass::layout_helper_boolean_diffbit(), G3_scratch);
1026 __ andcc(G3_scratch, G4_scratch, G0);
1027 Label L_skip;
1028 __ br(Assembler::zero, false, Assembler::pn, L_skip);
1029 __ delayed()->nop();
1030 __ and3(Otos_i, 1, Otos_i); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1031 __ bind(L_skip);
1032 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1033 }
1034
1035
castore()1036 void TemplateTable::castore() {
1037 transition(itos, vtos);
1038 __ pop_i(O2); // index
1039 // Otos_i: val
1040 // O3: array
1041 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
1042 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
1043 }
1044
1045
sastore()1046 void TemplateTable::sastore() {
1047 // %%%%% Factor across platform
1048 castore();
1049 }
1050
1051
istore(int n)1052 void TemplateTable::istore(int n) {
1053 transition(itos, vtos);
1054 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
1055 }
1056
1057
lstore(int n)1058 void TemplateTable::lstore(int n) {
1059 transition(ltos, vtos);
1060 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
1061 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
1062
1063 }
1064
1065
fstore(int n)1066 void TemplateTable::fstore(int n) {
1067 transition(ftos, vtos);
1068 assert(n < Argument::n_register_parameters, "only handle register cases");
1069 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
1070 }
1071
1072
dstore(int n)1073 void TemplateTable::dstore(int n) {
1074 transition(dtos, vtos);
1075 FloatRegister src = Ftos_d;
1076 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
1077 }
1078
1079
astore(int n)1080 void TemplateTable::astore(int n) {
1081 transition(vtos, vtos);
1082 __ load_ptr(0, Otos_i);
1083 __ inc(Lesp, Interpreter::stackElementSize);
1084 __ verify_oop_or_return_address(Otos_i, G3_scratch);
1085 __ store_local_ptr(n, Otos_i);
1086 }
1087
1088
pop()1089 void TemplateTable::pop() {
1090 transition(vtos, vtos);
1091 __ inc(Lesp, Interpreter::stackElementSize);
1092 }
1093
1094
pop2()1095 void TemplateTable::pop2() {
1096 transition(vtos, vtos);
1097 __ inc(Lesp, 2 * Interpreter::stackElementSize);
1098 }
1099
1100
dup()1101 void TemplateTable::dup() {
1102 transition(vtos, vtos);
1103 // stack: ..., a
1104 // load a and tag
1105 __ load_ptr(0, Otos_i);
1106 __ push_ptr(Otos_i);
1107 // stack: ..., a, a
1108 }
1109
1110
dup_x1()1111 void TemplateTable::dup_x1() {
1112 transition(vtos, vtos);
1113 // stack: ..., a, b
1114 __ load_ptr( 1, G3_scratch); // get a
1115 __ load_ptr( 0, Otos_l1); // get b
1116 __ store_ptr(1, Otos_l1); // put b
1117 __ store_ptr(0, G3_scratch); // put a - like swap
1118 __ push_ptr(Otos_l1); // push b
1119 // stack: ..., b, a, b
1120 }
1121
1122
dup_x2()1123 void TemplateTable::dup_x2() {
1124 transition(vtos, vtos);
1125 // stack: ..., a, b, c
1126 // get c and push on stack, reuse registers
1127 __ load_ptr( 0, G3_scratch); // get c
1128 __ push_ptr(G3_scratch); // push c with tag
1129 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1130 // (stack offsets n+1 now)
1131 __ load_ptr( 3, Otos_l1); // get a
1132 __ store_ptr(3, G3_scratch); // put c at 3
1133 // stack: ..., c, b, c, c (a in reg)
1134 __ load_ptr( 2, G3_scratch); // get b
1135 __ store_ptr(2, Otos_l1); // put a at 2
1136 // stack: ..., c, a, c, c (b in reg)
1137 __ store_ptr(1, G3_scratch); // put b at 1
1138 // stack: ..., c, a, b, c
1139 }
1140
1141
dup2()1142 void TemplateTable::dup2() {
1143 transition(vtos, vtos);
1144 __ load_ptr(1, G3_scratch); // get a
1145 __ load_ptr(0, Otos_l1); // get b
1146 __ push_ptr(G3_scratch); // push a
1147 __ push_ptr(Otos_l1); // push b
1148 // stack: ..., a, b, a, b
1149 }
1150
1151
dup2_x1()1152 void TemplateTable::dup2_x1() {
1153 transition(vtos, vtos);
1154 // stack: ..., a, b, c
1155 __ load_ptr( 1, Lscratch); // get b
1156 __ load_ptr( 2, Otos_l1); // get a
1157 __ store_ptr(2, Lscratch); // put b at a
1158 // stack: ..., b, b, c
1159 __ load_ptr( 0, G3_scratch); // get c
1160 __ store_ptr(1, G3_scratch); // put c at b
1161 // stack: ..., b, c, c
1162 __ store_ptr(0, Otos_l1); // put a at c
1163 // stack: ..., b, c, a
1164 __ push_ptr(Lscratch); // push b
1165 __ push_ptr(G3_scratch); // push c
1166 // stack: ..., b, c, a, b, c
1167 }
1168
1169
1170 // The spec says that these types can be a mixture of category 1 (1 word)
1171 // types and/or category 2 types (long and doubles)
dup2_x2()1172 void TemplateTable::dup2_x2() {
1173 transition(vtos, vtos);
1174 // stack: ..., a, b, c, d
1175 __ load_ptr( 1, Lscratch); // get c
1176 __ load_ptr( 3, Otos_l1); // get a
1177 __ store_ptr(3, Lscratch); // put c at 3
1178 __ store_ptr(1, Otos_l1); // put a at 1
1179 // stack: ..., c, b, a, d
1180 __ load_ptr( 2, G3_scratch); // get b
1181 __ load_ptr( 0, Otos_l1); // get d
1182 __ store_ptr(0, G3_scratch); // put b at 0
1183 __ store_ptr(2, Otos_l1); // put d at 2
1184 // stack: ..., c, d, a, b
1185 __ push_ptr(Lscratch); // push c
1186 __ push_ptr(Otos_l1); // push d
1187 // stack: ..., c, d, a, b, c, d
1188 }
1189
1190
swap()1191 void TemplateTable::swap() {
1192 transition(vtos, vtos);
1193 // stack: ..., a, b
1194 __ load_ptr( 1, G3_scratch); // get a
1195 __ load_ptr( 0, Otos_l1); // get b
1196 __ store_ptr(0, G3_scratch); // put b
1197 __ store_ptr(1, Otos_l1); // put a
1198 // stack: ..., b, a
1199 }
1200
1201
iop2(Operation op)1202 void TemplateTable::iop2(Operation op) {
1203 transition(itos, itos);
1204 __ pop_i(O1);
1205 switch (op) {
1206 case add: __ add(O1, Otos_i, Otos_i); break;
1207 case sub: __ sub(O1, Otos_i, Otos_i); break;
1208 // %%%%% Mul may not exist: better to call .mul?
1209 case mul: __ smul(O1, Otos_i, Otos_i); break;
1210 case _and: __ and3(O1, Otos_i, Otos_i); break;
1211 case _or: __ or3(O1, Otos_i, Otos_i); break;
1212 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1213 case shl: __ sll(O1, Otos_i, Otos_i); break;
1214 case shr: __ sra(O1, Otos_i, Otos_i); break;
1215 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1216 default: ShouldNotReachHere();
1217 }
1218 }
1219
1220
lop2(Operation op)1221 void TemplateTable::lop2(Operation op) {
1222 transition(ltos, ltos);
1223 __ pop_l(O2);
1224 switch (op) {
1225 case add: __ add(O2, Otos_l, Otos_l); break;
1226 case sub: __ sub(O2, Otos_l, Otos_l); break;
1227 case _and: __ and3(O2, Otos_l, Otos_l); break;
1228 case _or: __ or3(O2, Otos_l, Otos_l); break;
1229 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1230 default: ShouldNotReachHere();
1231 }
1232 }
1233
1234
idiv()1235 void TemplateTable::idiv() {
1236 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1237 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1238
1239 transition(itos, itos);
1240 __ pop_i(O1); // get 1st op
1241
1242 // Y contains upper 32 bits of result, set it to 0 or all ones
1243 __ wry(G0);
1244 __ mov(~0, G3_scratch);
1245
1246 __ tst(O1);
1247 Label neg;
1248 __ br(Assembler::negative, true, Assembler::pn, neg);
1249 __ delayed()->wry(G3_scratch);
1250 __ bind(neg);
1251
1252 Label ok;
1253 __ tst(Otos_i);
1254 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1255
1256 const int min_int = 0x80000000;
1257 Label regular;
1258 __ cmp(Otos_i, -1);
1259 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1260 // Don't put set in delay slot
1261 // Set will turn into multiple instructions in 64 bit mode
1262 __ delayed()->nop();
1263 __ set(min_int, G4_scratch);
1264 Label done;
1265 __ cmp(O1, G4_scratch);
1266 __ br(Assembler::equal, true, Assembler::pt, done);
1267 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1268
1269 __ bind(regular);
1270 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1271 __ bind(done);
1272 }
1273
1274
irem()1275 void TemplateTable::irem() {
1276 transition(itos, itos);
1277 __ mov(Otos_i, O2); // save divisor
1278 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1279 __ smul(Otos_i, O2, Otos_i);
1280 __ sub(O1, Otos_i, Otos_i);
1281 }
1282
1283
lmul()1284 void TemplateTable::lmul() {
1285 transition(ltos, ltos);
1286 __ pop_l(O2);
1287 __ mulx(Otos_l, O2, Otos_l);
1288
1289 }
1290
1291
ldiv()1292 void TemplateTable::ldiv() {
1293 transition(ltos, ltos);
1294
1295 // check for zero
1296 __ pop_l(O2);
1297 __ tst(Otos_l);
1298 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1299 __ sdivx(O2, Otos_l, Otos_l);
1300 }
1301
1302
lrem()1303 void TemplateTable::lrem() {
1304 transition(ltos, ltos);
1305
1306 // check for zero
1307 __ pop_l(O2);
1308 __ tst(Otos_l);
1309 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1310 __ sdivx(O2, Otos_l, Otos_l2);
1311 __ mulx (Otos_l2, Otos_l, Otos_l2);
1312 __ sub (O2, Otos_l2, Otos_l);
1313 }
1314
1315
lshl()1316 void TemplateTable::lshl() {
1317 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1318
1319 __ pop_l(O2); // shift value in O2, O3
1320 __ sllx(O2, Otos_i, Otos_l);
1321 }
1322
1323
lshr()1324 void TemplateTable::lshr() {
1325 transition(itos, ltos); // %%%% see lshl comment
1326
1327 __ pop_l(O2); // shift value in O2, O3
1328 __ srax(O2, Otos_i, Otos_l);
1329 }
1330
1331
1332
lushr()1333 void TemplateTable::lushr() {
1334 transition(itos, ltos); // %%%% see lshl comment
1335
1336 __ pop_l(O2); // shift value in O2, O3
1337 __ srlx(O2, Otos_i, Otos_l);
1338 }
1339
1340
fop2(Operation op)1341 void TemplateTable::fop2(Operation op) {
1342 transition(ftos, ftos);
1343 switch (op) {
1344 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1345 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1346 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1347 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1348 case rem:
1349 assert(Ftos_f == F0, "just checking");
1350 // LP64 calling conventions use F1, F3 for passing 2 floats
1351 __ pop_f(F1);
1352 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1353 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1354 assert( Ftos_f == F0, "fix this code" );
1355 break;
1356
1357 default: ShouldNotReachHere();
1358 }
1359 }
1360
1361
dop2(Operation op)1362 void TemplateTable::dop2(Operation op) {
1363 transition(dtos, dtos);
1364 switch (op) {
1365 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1366 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1367 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1368 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1369 case rem:
1370 // Pass arguments in D0, D2
1371 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1372 __ pop_d( F0 );
1373 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1374 assert( Ftos_d == F0, "fix this code" );
1375 break;
1376
1377 default: ShouldNotReachHere();
1378 }
1379 }
1380
1381
ineg()1382 void TemplateTable::ineg() {
1383 transition(itos, itos);
1384 __ neg(Otos_i);
1385 }
1386
1387
lneg()1388 void TemplateTable::lneg() {
1389 transition(ltos, ltos);
1390 __ sub(G0, Otos_l, Otos_l);
1391 }
1392
1393
fneg()1394 void TemplateTable::fneg() {
1395 transition(ftos, ftos);
1396 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
1397 }
1398
1399
dneg()1400 void TemplateTable::dneg() {
1401 transition(dtos, dtos);
1402 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
1403 }
1404
1405
iinc()1406 void TemplateTable::iinc() {
1407 transition(vtos, vtos);
1408 locals_index(G3_scratch);
1409 __ ldsb(Lbcp, 2, O2); // load constant
1410 __ access_local_int(G3_scratch, Otos_i);
1411 __ add(Otos_i, O2, Otos_i);
1412 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1413 }
1414
1415
wide_iinc()1416 void TemplateTable::wide_iinc() {
1417 transition(vtos, vtos);
1418 locals_index_wide(G3_scratch);
1419 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1420 __ access_local_int(G3_scratch, Otos_i);
1421 __ add(Otos_i, O3, Otos_i);
1422 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1423 }
1424
1425
convert()1426 void TemplateTable::convert() {
1427 // %%%%% Factor this first part accross platforms
1428 #ifdef ASSERT
1429 TosState tos_in = ilgl;
1430 TosState tos_out = ilgl;
1431 switch (bytecode()) {
1432 case Bytecodes::_i2l: // fall through
1433 case Bytecodes::_i2f: // fall through
1434 case Bytecodes::_i2d: // fall through
1435 case Bytecodes::_i2b: // fall through
1436 case Bytecodes::_i2c: // fall through
1437 case Bytecodes::_i2s: tos_in = itos; break;
1438 case Bytecodes::_l2i: // fall through
1439 case Bytecodes::_l2f: // fall through
1440 case Bytecodes::_l2d: tos_in = ltos; break;
1441 case Bytecodes::_f2i: // fall through
1442 case Bytecodes::_f2l: // fall through
1443 case Bytecodes::_f2d: tos_in = ftos; break;
1444 case Bytecodes::_d2i: // fall through
1445 case Bytecodes::_d2l: // fall through
1446 case Bytecodes::_d2f: tos_in = dtos; break;
1447 default : ShouldNotReachHere();
1448 }
1449 switch (bytecode()) {
1450 case Bytecodes::_l2i: // fall through
1451 case Bytecodes::_f2i: // fall through
1452 case Bytecodes::_d2i: // fall through
1453 case Bytecodes::_i2b: // fall through
1454 case Bytecodes::_i2c: // fall through
1455 case Bytecodes::_i2s: tos_out = itos; break;
1456 case Bytecodes::_i2l: // fall through
1457 case Bytecodes::_f2l: // fall through
1458 case Bytecodes::_d2l: tos_out = ltos; break;
1459 case Bytecodes::_i2f: // fall through
1460 case Bytecodes::_l2f: // fall through
1461 case Bytecodes::_d2f: tos_out = ftos; break;
1462 case Bytecodes::_i2d: // fall through
1463 case Bytecodes::_l2d: // fall through
1464 case Bytecodes::_f2d: tos_out = dtos; break;
1465 default : ShouldNotReachHere();
1466 }
1467 transition(tos_in, tos_out);
1468 #endif
1469
1470
1471 // Conversion
1472 Label done;
1473 switch (bytecode()) {
1474 case Bytecodes::_i2l:
1475 // Sign extend the 32 bits
1476 __ sra ( Otos_i, 0, Otos_l );
1477 break;
1478
1479 case Bytecodes::_i2f:
1480 __ st(Otos_i, __ d_tmp );
1481 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1482 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1483 break;
1484
1485 case Bytecodes::_i2d:
1486 __ st(Otos_i, __ d_tmp);
1487 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1488 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1489 break;
1490
1491 case Bytecodes::_i2b:
1492 __ sll(Otos_i, 24, Otos_i);
1493 __ sra(Otos_i, 24, Otos_i);
1494 break;
1495
1496 case Bytecodes::_i2c:
1497 __ sll(Otos_i, 16, Otos_i);
1498 __ srl(Otos_i, 16, Otos_i);
1499 break;
1500
1501 case Bytecodes::_i2s:
1502 __ sll(Otos_i, 16, Otos_i);
1503 __ sra(Otos_i, 16, Otos_i);
1504 break;
1505
1506 case Bytecodes::_l2i:
1507 // Sign-extend into the high 32 bits
1508 __ sra(Otos_l, 0, Otos_i);
1509 break;
1510
1511 case Bytecodes::_l2f:
1512 case Bytecodes::_l2d:
1513 __ st_long(Otos_l, __ d_tmp);
1514 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1515
1516 if (bytecode() == Bytecodes::_l2f) {
1517 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1518 } else {
1519 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1520 }
1521 break;
1522
1523 case Bytecodes::_f2i: {
1524 Label isNaN;
1525 // result must be 0 if value is NaN; test by comparing value to itself
1526 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1527 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1528 __ delayed()->clr(Otos_i); // NaN
1529 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1530 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1531 __ ld(__ d_tmp, Otos_i);
1532 __ bind(isNaN);
1533 }
1534 break;
1535
1536 case Bytecodes::_f2l:
1537 // must uncache tos
1538 __ push_f();
1539 __ pop_f(F1);
1540 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1541 break;
1542
1543 case Bytecodes::_f2d:
1544 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1545 break;
1546
1547 case Bytecodes::_d2i:
1548 case Bytecodes::_d2l:
1549 // must uncache tos
1550 __ push_d();
1551 // LP64 calling conventions pass first double arg in D0
1552 __ pop_d( Ftos_d );
1553 __ call_VM_leaf(Lscratch,
1554 bytecode() == Bytecodes::_d2i
1555 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1556 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1557 break;
1558
1559 case Bytecodes::_d2f:
1560 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1561 break;
1562
1563 default: ShouldNotReachHere();
1564 }
1565 __ bind(done);
1566 }
1567
1568
lcmp()1569 void TemplateTable::lcmp() {
1570 transition(ltos, itos);
1571
1572 __ pop_l(O1); // pop off value 1, value 2 is in O0
1573 __ lcmp( O1, Otos_l, Otos_i );
1574 }
1575
1576
float_cmp(bool is_float,int unordered_result)1577 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1578
1579 if (is_float) __ pop_f(F2);
1580 else __ pop_d(F2);
1581
1582 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1583
1584 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1585 }
1586
branch(bool is_jsr,bool is_wide)1587 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1588 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1589 __ verify_thread();
1590
1591 const Register O2_bumped_count = O2;
1592 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1593
1594 // get (wide) offset to O1_disp
1595 const Register O1_disp = O1;
1596 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1597 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1598
1599 // Handle all the JSR stuff here, then exit.
1600 // It's much shorter and cleaner than intermingling with the
1601 // non-JSR normal-branch stuff occurring below.
1602 if( is_jsr ) {
1603 // compute return address as bci in Otos_i
1604 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1605 __ sub(Lbcp, G3_scratch, G3_scratch);
1606 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1607
1608 // Bump Lbcp to target of JSR
1609 __ add(Lbcp, O1_disp, Lbcp);
1610 // Push returnAddress for "ret" on stack
1611 __ push_ptr(Otos_i);
1612 // And away we go!
1613 __ dispatch_next(vtos, 0, true);
1614 return;
1615 }
1616
1617 // Normal (non-jsr) branch handling
1618
1619 // Save the current Lbcp
1620 const Register l_cur_bcp = Lscratch;
1621 __ mov( Lbcp, l_cur_bcp );
1622
1623 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1624 if ( increment_invocation_counter_for_backward_branches ) {
1625 Label Lforward;
1626 // check branch direction
1627 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1628 // Bump bytecode pointer by displacement (take the branch)
1629 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1630
1631 const Register G3_method_counters = G3_scratch;
1632 __ get_method_counters(Lmethod, G3_method_counters, Lforward);
1633
1634 if (TieredCompilation) {
1635 Label Lno_mdo, Loverflow;
1636 int increment = InvocationCounter::count_increment;
1637 if (ProfileInterpreter) {
1638 // If no method data exists, go to profile_continue.
1639 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
1640 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1641
1642 // Increment backedge counter in the MDO
1643 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1644 in_bytes(InvocationCounter::counter_offset()));
1645 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
1646 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
1647 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward);
1648 __ ba_short(Loverflow);
1649 }
1650
1651 // If there's no MDO, increment counter in MethodCounters*
1652 __ bind(Lno_mdo);
1653 Address backedge_counter(G3_method_counters,
1654 in_bytes(MethodCounters::backedge_counter_offset()) +
1655 in_bytes(InvocationCounter::counter_offset()));
1656 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
1657 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
1658 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward);
1659 __ bind(Loverflow);
1660
1661 // notify point for loop, pass branch bytecode
1662 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp);
1663
1664 // Was an OSR adapter generated?
1665 // O0 = osr nmethod
1666 __ br_null_short(O0, Assembler::pn, Lforward);
1667
1668 // Has the nmethod been invalidated already?
1669 __ ldub(O0, nmethod::state_offset(), O2);
1670 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward);
1671
1672 // migrate the interpreter frame off of the stack
1673
1674 __ mov(G2_thread, L7);
1675 // save nmethod
1676 __ mov(O0, L6);
1677 __ set_last_Java_frame(SP, noreg);
1678 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1679 __ reset_last_Java_frame();
1680 __ mov(L7, G2_thread);
1681
1682 // move OSR nmethod to I1
1683 __ mov(L6, I1);
1684
1685 // OSR buffer to I0
1686 __ mov(O0, I0);
1687
1688 // remove the interpreter frame
1689 __ restore(I5_savedSP, 0, SP);
1690
1691 // Jump to the osr code.
1692 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1693 __ jmp(O2, G0);
1694 __ delayed()->nop();
1695
1696 } else { // not TieredCompilation
1697 // Update Backedge branch separately from invocations
1698 const Register G4_invoke_ctr = G4;
1699 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch);
1700 if (ProfileInterpreter) {
1701 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
1702 if (UseOnStackReplacement) {
1703
1704 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
1705 }
1706 } else {
1707 if (UseOnStackReplacement) {
1708 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
1709 }
1710 }
1711 }
1712
1713 __ bind(Lforward);
1714 } else
1715 // Bump bytecode pointer by displacement (take the branch)
1716 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1717
1718 // continue with bytecode @ target
1719 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1720 // %%%%% and changing dispatch_next to dispatch_only
1721 __ dispatch_next(vtos, 0, true);
1722 }
1723
1724
1725 // Note Condition in argument is TemplateTable::Condition
1726 // arg scope is within class scope
1727
if_0cmp(Condition cc)1728 void TemplateTable::if_0cmp(Condition cc) {
1729 // no pointers, integer only!
1730 transition(itos, vtos);
1731 // assume branch is more often taken than not (loops use backward branches)
1732 __ cmp( Otos_i, 0);
1733 __ if_cmp(ccNot(cc), false);
1734 }
1735
1736
if_icmp(Condition cc)1737 void TemplateTable::if_icmp(Condition cc) {
1738 transition(itos, vtos);
1739 __ pop_i(O1);
1740 __ cmp(O1, Otos_i);
1741 __ if_cmp(ccNot(cc), false);
1742 }
1743
1744
if_nullcmp(Condition cc)1745 void TemplateTable::if_nullcmp(Condition cc) {
1746 transition(atos, vtos);
1747 __ tst(Otos_i);
1748 __ if_cmp(ccNot(cc), true);
1749 }
1750
1751
if_acmp(Condition cc)1752 void TemplateTable::if_acmp(Condition cc) {
1753 transition(atos, vtos);
1754 __ pop_ptr(O1);
1755 __ verify_oop(O1);
1756 __ verify_oop(Otos_i);
1757 __ cmp(O1, Otos_i);
1758 __ if_cmp(ccNot(cc), true);
1759 }
1760
1761
1762
ret()1763 void TemplateTable::ret() {
1764 transition(vtos, vtos);
1765 locals_index(G3_scratch);
1766 __ access_local_returnAddress(G3_scratch, Otos_i);
1767 // Otos_i contains the bci, compute the bcp from that
1768
1769 #ifdef ASSERT
1770 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1771 // the result. The return address (really a BCI) was stored with an
1772 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1773 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1774 // loaded value.
1775 { Label zzz ;
1776 __ set (65536, G3_scratch) ;
1777 __ cmp (Otos_i, G3_scratch) ;
1778 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1779 __ delayed()->nop();
1780 __ stop("BCI is in the wrong register half?");
1781 __ bind (zzz) ;
1782 }
1783 #endif
1784
1785 __ profile_ret(vtos, Otos_i, G4_scratch);
1786
1787 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1788 __ add(G3_scratch, Otos_i, G3_scratch);
1789 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1790 __ dispatch_next(vtos, 0, true);
1791 }
1792
1793
wide_ret()1794 void TemplateTable::wide_ret() {
1795 transition(vtos, vtos);
1796 locals_index_wide(G3_scratch);
1797 __ access_local_returnAddress(G3_scratch, Otos_i);
1798 // Otos_i contains the bci, compute the bcp from that
1799
1800 __ profile_ret(vtos, Otos_i, G4_scratch);
1801
1802 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1803 __ add(G3_scratch, Otos_i, G3_scratch);
1804 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1805 __ dispatch_next(vtos, 0, true);
1806 }
1807
1808
tableswitch()1809 void TemplateTable::tableswitch() {
1810 transition(itos, vtos);
1811 Label default_case, continue_execution;
1812
1813 // align bcp
1814 __ add(Lbcp, BytesPerInt, O1);
1815 __ and3(O1, -BytesPerInt, O1);
1816 // load lo, hi
1817 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1818 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1819 // Sign extend the 32 bits
1820 __ sra ( Otos_i, 0, Otos_i );
1821
1822 // check against lo & hi
1823 __ cmp( Otos_i, O2);
1824 __ br( Assembler::less, false, Assembler::pn, default_case);
1825 __ delayed()->cmp( Otos_i, O3 );
1826 __ br( Assembler::greater, false, Assembler::pn, default_case);
1827 // lookup dispatch offset
1828 __ delayed()->sub(Otos_i, O2, O2);
1829 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1830 __ sll(O2, LogBytesPerInt, O2);
1831 __ add(O2, 3 * BytesPerInt, O2);
1832 __ ba(continue_execution);
1833 __ delayed()->ld(O1, O2, O2);
1834 // handle default
1835 __ bind(default_case);
1836 __ profile_switch_default(O3);
1837 __ ld(O1, 0, O2); // get default offset
1838 // continue execution
1839 __ bind(continue_execution);
1840 __ add(Lbcp, O2, Lbcp);
1841 __ dispatch_next(vtos, 0, true);
1842 }
1843
1844
lookupswitch()1845 void TemplateTable::lookupswitch() {
1846 transition(itos, itos);
1847 __ stop("lookupswitch bytecode should have been rewritten");
1848 }
1849
fast_linearswitch()1850 void TemplateTable::fast_linearswitch() {
1851 transition(itos, vtos);
1852 Label loop_entry, loop, found, continue_execution;
1853 // align bcp
1854 __ add(Lbcp, BytesPerInt, O1);
1855 __ and3(O1, -BytesPerInt, O1);
1856 // set counter
1857 __ ld(O1, BytesPerInt, O2);
1858 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1859 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1860 __ ba(loop_entry);
1861 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1862
1863 // table search
1864 __ bind(loop);
1865 __ cmp(O4, Otos_i);
1866 __ br(Assembler::equal, true, Assembler::pn, found);
1867 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1868 __ inc(O3, 2 * BytesPerInt);
1869
1870 __ bind(loop_entry);
1871 __ cmp(O2, O3);
1872 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1873 __ delayed()->ld(O3, 0, O4);
1874
1875 // default case
1876 __ ld(O1, 0, O4); // get default offset
1877 if (ProfileInterpreter) {
1878 __ profile_switch_default(O3);
1879 __ ba_short(continue_execution);
1880 }
1881
1882 // entry found -> get offset
1883 __ bind(found);
1884 if (ProfileInterpreter) {
1885 __ sub(O3, O1, O3);
1886 __ sub(O3, 2*BytesPerInt, O3);
1887 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1888 __ profile_switch_case(O3, O1, O2, G3_scratch);
1889
1890 __ bind(continue_execution);
1891 }
1892 __ add(Lbcp, O4, Lbcp);
1893 __ dispatch_next(vtos, 0, true);
1894 }
1895
1896
fast_binaryswitch()1897 void TemplateTable::fast_binaryswitch() {
1898 transition(itos, vtos);
1899 // Implementation using the following core algorithm: (copied from Intel)
1900 //
1901 // int binary_search(int key, LookupswitchPair* array, int n) {
1902 // // Binary search according to "Methodik des Programmierens" by
1903 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1904 // int i = 0;
1905 // int j = n;
1906 // while (i+1 < j) {
1907 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1908 // // with Q: for all i: 0 <= i < n: key < a[i]
1909 // // where a stands for the array and assuming that the (inexisting)
1910 // // element a[n] is infinitely big.
1911 // int h = (i + j) >> 1;
1912 // // i < h < j
1913 // if (key < array[h].fast_match()) {
1914 // j = h;
1915 // } else {
1916 // i = h;
1917 // }
1918 // }
1919 // // R: a[i] <= key < a[i+1] or Q
1920 // // (i.e., if key is within array, i is the correct index)
1921 // return i;
1922 // }
1923
1924 // register allocation
1925 assert(Otos_i == O0, "alias checking");
1926 const Register Rkey = Otos_i; // already set (tosca)
1927 const Register Rarray = O1;
1928 const Register Ri = O2;
1929 const Register Rj = O3;
1930 const Register Rh = O4;
1931 const Register Rscratch = O5;
1932
1933 const int log_entry_size = 3;
1934 const int entry_size = 1 << log_entry_size;
1935
1936 Label found;
1937 // Find Array start
1938 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1939 __ and3(Rarray, -BytesPerInt, Rarray);
1940 // initialize i & j (in delay slot)
1941 __ clr( Ri );
1942
1943 // and start
1944 Label entry;
1945 __ ba(entry);
1946 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1947 // (Rj is already in the native byte-ordering.)
1948
1949 // binary search loop
1950 { Label loop;
1951 __ bind( loop );
1952 // int h = (i + j) >> 1;
1953 __ sra( Rh, 1, Rh );
1954 // if (key < array[h].fast_match()) {
1955 // j = h;
1956 // } else {
1957 // i = h;
1958 // }
1959 __ sll( Rh, log_entry_size, Rscratch );
1960 __ ld( Rarray, Rscratch, Rscratch );
1961 // (Rscratch is already in the native byte-ordering.)
1962 __ cmp( Rkey, Rscratch );
1963 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1964 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1965
1966 // while (i+1 < j)
1967 __ bind( entry );
1968 __ add( Ri, 1, Rscratch );
1969 __ cmp(Rscratch, Rj);
1970 __ br( Assembler::less, true, Assembler::pt, loop );
1971 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1972 }
1973
1974 // end of binary search, result index is i (must check again!)
1975 Label default_case;
1976 Label continue_execution;
1977 if (ProfileInterpreter) {
1978 __ mov( Ri, Rh ); // Save index in i for profiling
1979 }
1980 __ sll( Ri, log_entry_size, Ri );
1981 __ ld( Rarray, Ri, Rscratch );
1982 // (Rscratch is already in the native byte-ordering.)
1983 __ cmp( Rkey, Rscratch );
1984 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1985 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1986
1987 // entry found -> j = offset
1988 __ inc( Ri, BytesPerInt );
1989 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1990 __ ld( Rarray, Ri, Rj );
1991 // (Rj is already in the native byte-ordering.)
1992
1993 if (ProfileInterpreter) {
1994 __ ba_short(continue_execution);
1995 }
1996
1997 __ bind(default_case); // fall through (if not profiling)
1998 __ profile_switch_default(Ri);
1999
2000 __ bind(continue_execution);
2001 __ add( Lbcp, Rj, Lbcp );
2002 __ dispatch_next(vtos, 0, true);
2003 }
2004
2005
_return(TosState state)2006 void TemplateTable::_return(TosState state) {
2007 transition(state, state);
2008 assert(_desc->calls_vm(), "inconsistent calls_vm information");
2009
2010 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2011 assert(state == vtos, "only valid state");
2012 __ mov(G0, G3_scratch);
2013 __ access_local_ptr(G3_scratch, Otos_i);
2014 __ load_klass(Otos_i, O2);
2015 __ set(JVM_ACC_HAS_FINALIZER, G3);
2016 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
2017 __ andcc(G3, O2, G0);
2018 Label skip_register_finalizer;
2019 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
2020 __ delayed()->nop();
2021
2022 // Call out to do finalizer registration
2023 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
2024
2025 __ bind(skip_register_finalizer);
2026 }
2027
2028 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2029 Label no_safepoint;
2030 __ ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0);
2031 __ btst(SafepointMechanism::poll_bit(), G3_scratch);
2032 __ br(Assembler::zero, false, Assembler::pt, no_safepoint);
2033 __ delayed()->nop();
2034 __ push(state);
2035 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2036 __ pop(state);
2037 __ bind(no_safepoint);
2038 }
2039
2040 // Narrow result if state is itos but result type is smaller.
2041 // Need to narrow in the return bytecode rather than in generate_return_entry
2042 // since compiled code callers expect the result to already be narrowed.
2043 if (state == itos) {
2044 __ narrow(Otos_i);
2045 }
2046 __ remove_activation(state, /* throw_monitor_exception */ true);
2047
2048 // The caller's SP was adjusted upon method entry to accomodate
2049 // the callee's non-argument locals. Undo that adjustment.
2050 __ ret(); // return to caller
2051 __ delayed()->restore(I5_savedSP, G0, SP);
2052 }
2053
2054
2055 // ----------------------------------------------------------------------------
2056 // Volatile variables demand their effects be made known to all CPU's in
2057 // order. Store buffers on most chips allow reads & writes to reorder; the
2058 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2059 // memory barrier (i.e., it's not sufficient that the interpreter does not
2060 // reorder volatile references, the hardware also must not reorder them).
2061 //
2062 // According to the new Java Memory Model (JMM):
2063 // (1) All volatiles are serialized wrt to each other.
2064 // ALSO reads & writes act as aquire & release, so:
2065 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2066 // the read float up to before the read. It's OK for non-volatile memory refs
2067 // that happen before the volatile read to float down below it.
2068 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2069 // that happen BEFORE the write float down to after the write. It's OK for
2070 // non-volatile memory refs that happen after the volatile write to float up
2071 // before it.
2072 //
2073 // We only put in barriers around volatile refs (they are expensive), not
2074 // _between_ memory refs (that would require us to track the flavor of the
2075 // previous memory refs). Requirements (2) and (3) require some barriers
2076 // before volatile stores and after volatile loads. These nearly cover
2077 // requirement (1) but miss the volatile-store-volatile-load case. This final
2078 // case is placed after volatile-stores although it could just as well go
2079 // before volatile-loads.
volatile_barrier(Assembler::Membar_mask_bits order_constraint)2080 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2081 // Helper function to insert a is-volatile test and memory barrier
2082 // All current sparc implementations run in TSO, needing only StoreLoad
2083 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2084 __ membar( order_constraint );
2085 }
2086
2087 // ----------------------------------------------------------------------------
resolve_cache_and_index(int byte_no,Register Rcache,Register index,size_t index_size)2088 void TemplateTable::resolve_cache_and_index(int byte_no,
2089 Register Rcache,
2090 Register index,
2091 size_t index_size) {
2092 // Depends on cpCacheOop layout!
2093
2094 Label resolved;
2095 Bytecodes::Code code = bytecode();
2096 switch (code) {
2097 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2098 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2099 }
2100
2101 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2102 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2103 __ cmp(Lbyte_code, code); // have we resolved this bytecode?
2104 __ br(Assembler::equal, false, Assembler::pt, resolved);
2105 __ delayed()->set(code, O1);
2106
2107 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2108 // first time invocation - must resolve first
2109 __ call_VM(noreg, entry, O1);
2110 // Update registers with resolved info
2111 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2112 __ bind(resolved);
2113 }
2114
load_invoke_cp_cache_entry(int byte_no,Register method,Register itable_index,Register flags,bool is_invokevirtual,bool is_invokevfinal,bool is_invokedynamic)2115 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2116 Register method,
2117 Register itable_index,
2118 Register flags,
2119 bool is_invokevirtual,
2120 bool is_invokevfinal,
2121 bool is_invokedynamic) {
2122 // Uses both G3_scratch and G4_scratch
2123 Register cache = G3_scratch;
2124 Register index = G4_scratch;
2125 assert_different_registers(cache, method, itable_index);
2126
2127 // determine constant pool cache field offsets
2128 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2129 const int method_offset = in_bytes(
2130 ConstantPoolCache::base_offset() +
2131 ((byte_no == f2_byte)
2132 ? ConstantPoolCacheEntry::f2_offset()
2133 : ConstantPoolCacheEntry::f1_offset()
2134 )
2135 );
2136 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2137 ConstantPoolCacheEntry::flags_offset());
2138 // access constant pool cache fields
2139 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2140 ConstantPoolCacheEntry::f2_offset());
2141
2142 if (is_invokevfinal) {
2143 __ get_cache_and_index_at_bcp(cache, index, 1);
2144 __ ld_ptr(Address(cache, method_offset), method);
2145 } else {
2146 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2147 resolve_cache_and_index(byte_no, cache, index, index_size);
2148 __ ld_ptr(Address(cache, method_offset), method);
2149 }
2150
2151 if (itable_index != noreg) {
2152 // pick up itable or appendix index from f2 also:
2153 __ ld_ptr(Address(cache, index_offset), itable_index);
2154 }
2155 __ ld_ptr(Address(cache, flags_offset), flags);
2156 }
2157
2158 // The Rcache register must be set before call
load_field_cp_cache_entry(Register Robj,Register Rcache,Register index,Register Roffset,Register Rflags,bool is_static)2159 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2160 Register Rcache,
2161 Register index,
2162 Register Roffset,
2163 Register Rflags,
2164 bool is_static) {
2165 assert_different_registers(Rcache, Rflags, Roffset, Lscratch);
2166
2167 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2168
2169 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2170 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2171 if (is_static) {
2172 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2173 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2174 __ ld_ptr( Robj, mirror_offset, Robj);
2175 __ resolve_oop_handle(Robj, Lscratch);
2176 }
2177 }
2178
2179 // The registers Rcache and index expected to be set before call.
2180 // Correct values of the Rcache and index registers are preserved.
jvmti_post_field_access(Register Rcache,Register index,bool is_static,bool has_tos)2181 void TemplateTable::jvmti_post_field_access(Register Rcache,
2182 Register index,
2183 bool is_static,
2184 bool has_tos) {
2185 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2186
2187 if (JvmtiExport::can_post_field_access()) {
2188 // Check to see if a field access watch has been set before we take
2189 // the time to call into the VM.
2190 Label Label1;
2191 assert_different_registers(Rcache, index, G1_scratch);
2192 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2193 __ load_contents(get_field_access_count_addr, G1_scratch);
2194 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2195
2196 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2197
2198 if (is_static) {
2199 __ clr(Otos_i);
2200 } else {
2201 if (has_tos) {
2202 // save object pointer before call_VM() clobbers it
2203 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2204 } else {
2205 // Load top of stack (do not pop the value off the stack);
2206 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2207 }
2208 __ verify_oop(Otos_i);
2209 }
2210 // Otos_i: object pointer or NULL if static
2211 // Rcache: cache entry pointer
2212 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2213 Otos_i, Rcache);
2214 if (!is_static && has_tos) {
2215 __ pop_ptr(Otos_i); // restore object pointer
2216 __ verify_oop(Otos_i);
2217 }
2218 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2219 __ bind(Label1);
2220 }
2221 }
2222
getfield_or_static(int byte_no,bool is_static,RewriteControl rc)2223 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2224 transition(vtos, vtos);
2225
2226 Register Rcache = G3_scratch;
2227 Register index = G4_scratch;
2228 Register Rclass = Rcache;
2229 Register Roffset= G4_scratch;
2230 Register Rflags = G1_scratch;
2231 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2232
2233 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2234 jvmti_post_field_access(Rcache, index, is_static, false);
2235 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2236
2237 if (!is_static) {
2238 pop_and_check_object(Rclass);
2239 } else {
2240 __ verify_oop(Rclass);
2241 }
2242
2243 Label exit;
2244
2245 Assembler::Membar_mask_bits membar_bits =
2246 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2247
2248 if (__ membar_has_effect(membar_bits)) {
2249 // Get volatile flag
2250 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2251 __ and3(Rflags, Lscratch, Lscratch);
2252 }
2253
2254 Label checkVolatile;
2255
2256 // compute field type
2257 Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj;
2258 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2259 // Make sure we don't need to mask Rflags after the above shift
2260 ConstantPoolCacheEntry::verify_tos_state_shift();
2261
2262 // Check atos before itos for getstatic, more likely (in Queens at least)
2263 __ cmp(Rflags, atos);
2264 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2265 __ delayed() ->cmp(Rflags, itos);
2266
2267 // atos
2268 do_oop_load(_masm, Rclass, Roffset, 0, Otos_i, noreg);
2269 __ verify_oop(Otos_i);
2270 __ push(atos);
2271 if (!is_static && rc == may_rewrite) {
2272 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2273 }
2274 __ ba(checkVolatile);
2275 __ delayed()->tst(Lscratch);
2276
2277 __ bind(notObj);
2278
2279 // cmp(Rflags, itos);
2280 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2281 __ delayed() ->cmp(Rflags, ltos);
2282
2283 // itos
2284 __ ld(Rclass, Roffset, Otos_i);
2285 __ push(itos);
2286 if (!is_static && rc == may_rewrite) {
2287 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2288 }
2289 __ ba(checkVolatile);
2290 __ delayed()->tst(Lscratch);
2291
2292 __ bind(notInt);
2293
2294 // cmp(Rflags, ltos);
2295 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2296 __ delayed() ->cmp(Rflags, btos);
2297
2298 // ltos
2299 // load must be atomic
2300 __ ld_long(Rclass, Roffset, Otos_l);
2301 __ push(ltos);
2302 if (!is_static && rc == may_rewrite) {
2303 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2304 }
2305 __ ba(checkVolatile);
2306 __ delayed()->tst(Lscratch);
2307
2308 __ bind(notLong);
2309
2310 // cmp(Rflags, btos);
2311 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2312 __ delayed() ->cmp(Rflags, ztos);
2313
2314 // btos
2315 __ ldsb(Rclass, Roffset, Otos_i);
2316 __ push(itos);
2317 if (!is_static && rc == may_rewrite) {
2318 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2319 }
2320 __ ba(checkVolatile);
2321 __ delayed()->tst(Lscratch);
2322
2323 __ bind(notByte);
2324
2325 // cmp(Rflags, ztos);
2326 __ br(Assembler::notEqual, false, Assembler::pt, notBool);
2327 __ delayed() ->cmp(Rflags, ctos);
2328
2329 // ztos
2330 __ ldsb(Rclass, Roffset, Otos_i);
2331 __ push(itos);
2332 if (!is_static && rc == may_rewrite) {
2333 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2334 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2335 }
2336 __ ba(checkVolatile);
2337 __ delayed()->tst(Lscratch);
2338
2339 __ bind(notBool);
2340
2341 // cmp(Rflags, ctos);
2342 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2343 __ delayed() ->cmp(Rflags, stos);
2344
2345 // ctos
2346 __ lduh(Rclass, Roffset, Otos_i);
2347 __ push(itos);
2348 if (!is_static && rc == may_rewrite) {
2349 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2350 }
2351 __ ba(checkVolatile);
2352 __ delayed()->tst(Lscratch);
2353
2354 __ bind(notChar);
2355
2356 // cmp(Rflags, stos);
2357 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2358 __ delayed() ->cmp(Rflags, ftos);
2359
2360 // stos
2361 __ ldsh(Rclass, Roffset, Otos_i);
2362 __ push(itos);
2363 if (!is_static && rc == may_rewrite) {
2364 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2365 }
2366 __ ba(checkVolatile);
2367 __ delayed()->tst(Lscratch);
2368
2369 __ bind(notShort);
2370
2371
2372 // cmp(Rflags, ftos);
2373 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2374 __ delayed() ->tst(Lscratch);
2375
2376 // ftos
2377 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2378 __ push(ftos);
2379 if (!is_static && rc == may_rewrite) {
2380 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2381 }
2382 __ ba(checkVolatile);
2383 __ delayed()->tst(Lscratch);
2384
2385 __ bind(notFloat);
2386
2387
2388 // dtos
2389 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2390 __ push(dtos);
2391 if (!is_static && rc == may_rewrite) {
2392 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2393 }
2394
2395 __ bind(checkVolatile);
2396 if (__ membar_has_effect(membar_bits)) {
2397 // __ tst(Lscratch); executed in delay slot
2398 __ br(Assembler::zero, false, Assembler::pt, exit);
2399 __ delayed()->nop();
2400 volatile_barrier(membar_bits);
2401 }
2402
2403 __ bind(exit);
2404 }
2405
getfield(int byte_no)2406 void TemplateTable::getfield(int byte_no) {
2407 getfield_or_static(byte_no, false);
2408 }
2409
nofast_getfield(int byte_no)2410 void TemplateTable::nofast_getfield(int byte_no) {
2411 getfield_or_static(byte_no, false, may_not_rewrite);
2412 }
2413
getstatic(int byte_no)2414 void TemplateTable::getstatic(int byte_no) {
2415 getfield_or_static(byte_no, true);
2416 }
2417
fast_accessfield(TosState state)2418 void TemplateTable::fast_accessfield(TosState state) {
2419 transition(atos, state);
2420 Register Rcache = G3_scratch;
2421 Register index = G4_scratch;
2422 Register Roffset = G4_scratch;
2423 Register Rflags = Rcache;
2424 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2425
2426 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2427 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2428
2429 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2430
2431 __ null_check(Otos_i);
2432 __ verify_oop(Otos_i);
2433
2434 Label exit;
2435
2436 Assembler::Membar_mask_bits membar_bits =
2437 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2438 if (__ membar_has_effect(membar_bits)) {
2439 // Get volatile flag
2440 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2441 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2442 }
2443
2444 switch (bytecode()) {
2445 case Bytecodes::_fast_bgetfield:
2446 __ ldsb(Otos_i, Roffset, Otos_i);
2447 break;
2448 case Bytecodes::_fast_cgetfield:
2449 __ lduh(Otos_i, Roffset, Otos_i);
2450 break;
2451 case Bytecodes::_fast_sgetfield:
2452 __ ldsh(Otos_i, Roffset, Otos_i);
2453 break;
2454 case Bytecodes::_fast_igetfield:
2455 __ ld(Otos_i, Roffset, Otos_i);
2456 break;
2457 case Bytecodes::_fast_lgetfield:
2458 __ ld_long(Otos_i, Roffset, Otos_l);
2459 break;
2460 case Bytecodes::_fast_fgetfield:
2461 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2462 break;
2463 case Bytecodes::_fast_dgetfield:
2464 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2465 break;
2466 case Bytecodes::_fast_agetfield:
2467 do_oop_load(_masm, Otos_i, Roffset, 0, Otos_i, noreg);
2468 break;
2469 default:
2470 ShouldNotReachHere();
2471 }
2472
2473 if (__ membar_has_effect(membar_bits)) {
2474 __ btst(Lscratch, Rflags);
2475 __ br(Assembler::zero, false, Assembler::pt, exit);
2476 __ delayed()->nop();
2477 volatile_barrier(membar_bits);
2478 __ bind(exit);
2479 }
2480
2481 if (state == atos) {
2482 __ verify_oop(Otos_i); // does not blow flags!
2483 }
2484 }
2485
jvmti_post_fast_field_mod()2486 void TemplateTable::jvmti_post_fast_field_mod() {
2487 if (JvmtiExport::can_post_field_modification()) {
2488 // Check to see if a field modification watch has been set before we take
2489 // the time to call into the VM.
2490 Label done;
2491 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2492 __ load_contents(get_field_modification_count_addr, G4_scratch);
2493 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2494 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2495 __ verify_oop(G4_scratch);
2496 __ push_ptr(G4_scratch); // put the object pointer back on tos
2497 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2498 // Save tos values before call_VM() clobbers them. Since we have
2499 // to do it for every data type, we use the saved values as the
2500 // jvalue object.
2501 switch (bytecode()) { // save tos values before call_VM() clobbers them
2502 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2503 case Bytecodes::_fast_bputfield: // fall through
2504 case Bytecodes::_fast_zputfield: // fall through
2505 case Bytecodes::_fast_sputfield: // fall through
2506 case Bytecodes::_fast_cputfield: // fall through
2507 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2508 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2509 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2510 // get words in right order for use as jvalue object
2511 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2512 }
2513 // setup pointer to jvalue object
2514 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2515 // G4_scratch: object pointer
2516 // G1_scratch: cache entry pointer
2517 // G3_scratch: jvalue object on the stack
2518 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2519 switch (bytecode()) { // restore tos values
2520 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2521 case Bytecodes::_fast_bputfield: // fall through
2522 case Bytecodes::_fast_zputfield: // fall through
2523 case Bytecodes::_fast_sputfield: // fall through
2524 case Bytecodes::_fast_cputfield: // fall through
2525 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2526 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2527 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2528 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2529 }
2530 __ bind(done);
2531 }
2532 }
2533
2534 // The registers Rcache and index expected to be set before call.
2535 // The function may destroy various registers, just not the Rcache and index registers.
jvmti_post_field_mod(Register Rcache,Register index,bool is_static)2536 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2537 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2538
2539 if (JvmtiExport::can_post_field_modification()) {
2540 // Check to see if a field modification watch has been set before we take
2541 // the time to call into the VM.
2542 Label Label1;
2543 assert_different_registers(Rcache, index, G1_scratch);
2544 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2545 __ load_contents(get_field_modification_count_addr, G1_scratch);
2546 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2547
2548 // The Rcache and index registers have been already set.
2549 // This allows to eliminate this call but the Rcache and index
2550 // registers must be correspondingly used after this line.
2551 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2552
2553 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2554 if (is_static) {
2555 // Life is simple. Null out the object pointer.
2556 __ clr(G4_scratch);
2557 } else {
2558 Register Rflags = G1_scratch;
2559 // Life is harder. The stack holds the value on top, followed by the
2560 // object. We don't know the size of the value, though; it could be
2561 // one or two words depending on its type. As a result, we must find
2562 // the type to determine where the object is.
2563
2564 Label two_word, valsizeknown;
2565 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2566 __ mov(Lesp, G4_scratch);
2567 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2568 // Make sure we don't need to mask Rflags after the above shift
2569 ConstantPoolCacheEntry::verify_tos_state_shift();
2570 __ cmp(Rflags, ltos);
2571 __ br(Assembler::equal, false, Assembler::pt, two_word);
2572 __ delayed()->cmp(Rflags, dtos);
2573 __ br(Assembler::equal, false, Assembler::pt, two_word);
2574 __ delayed()->nop();
2575 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2576 __ ba_short(valsizeknown);
2577 __ bind(two_word);
2578
2579 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2580
2581 __ bind(valsizeknown);
2582 // setup object pointer
2583 __ ld_ptr(G4_scratch, 0, G4_scratch);
2584 __ verify_oop(G4_scratch);
2585 }
2586 // setup pointer to jvalue object
2587 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2588 // G4_scratch: object pointer or NULL if static
2589 // G3_scratch: cache entry pointer
2590 // G1_scratch: jvalue object on the stack
2591 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2592 G4_scratch, G3_scratch, G1_scratch);
2593 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2594 __ bind(Label1);
2595 }
2596 }
2597
pop_and_check_object(Register r)2598 void TemplateTable::pop_and_check_object(Register r) {
2599 __ pop_ptr(r);
2600 __ null_check(r); // for field access must check obj.
2601 __ verify_oop(r);
2602 }
2603
putfield_or_static(int byte_no,bool is_static,RewriteControl rc)2604 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2605 transition(vtos, vtos);
2606 Register Rcache = G3_scratch;
2607 Register index = G4_scratch;
2608 Register Rclass = Rcache;
2609 Register Roffset= G4_scratch;
2610 Register Rflags = G1_scratch;
2611 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2612
2613 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2614 jvmti_post_field_mod(Rcache, index, is_static);
2615 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2616
2617 Assembler::Membar_mask_bits read_bits =
2618 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2619 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2620
2621 Label notVolatile, checkVolatile, exit;
2622 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2623 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2624 __ and3(Rflags, Lscratch, Lscratch);
2625
2626 if (__ membar_has_effect(read_bits)) {
2627 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2628 volatile_barrier(read_bits);
2629 __ bind(notVolatile);
2630 }
2631 }
2632
2633 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2634 // Make sure we don't need to mask Rflags after the above shift
2635 ConstantPoolCacheEntry::verify_tos_state_shift();
2636
2637 // compute field type
2638 Label notInt, notShort, notChar, notObj, notByte, notBool, notLong, notFloat;
2639
2640 if (is_static) {
2641 // putstatic with object type most likely, check that first
2642 __ cmp(Rflags, atos);
2643 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2644 __ delayed()->cmp(Rflags, itos);
2645
2646 // atos
2647 {
2648 __ pop_ptr();
2649 __ verify_oop(Otos_i);
2650 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch);
2651 __ ba(checkVolatile);
2652 __ delayed()->tst(Lscratch);
2653 }
2654
2655 __ bind(notObj);
2656 // cmp(Rflags, itos);
2657 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2658 __ delayed()->cmp(Rflags, btos);
2659
2660 // itos
2661 {
2662 __ pop_i();
2663 __ st(Otos_i, Rclass, Roffset);
2664 __ ba(checkVolatile);
2665 __ delayed()->tst(Lscratch);
2666 }
2667
2668 __ bind(notInt);
2669 } else {
2670 // putfield with int type most likely, check that first
2671 __ cmp(Rflags, itos);
2672 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2673 __ delayed()->cmp(Rflags, atos);
2674
2675 // itos
2676 {
2677 __ pop_i();
2678 pop_and_check_object(Rclass);
2679 __ st(Otos_i, Rclass, Roffset);
2680 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2681 __ ba(checkVolatile);
2682 __ delayed()->tst(Lscratch);
2683 }
2684
2685 __ bind(notInt);
2686 // cmp(Rflags, atos);
2687 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2688 __ delayed()->cmp(Rflags, btos);
2689
2690 // atos
2691 {
2692 __ pop_ptr();
2693 pop_and_check_object(Rclass);
2694 __ verify_oop(Otos_i);
2695 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch);
2696 if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2697 __ ba(checkVolatile);
2698 __ delayed()->tst(Lscratch);
2699 }
2700
2701 __ bind(notObj);
2702 }
2703
2704 // cmp(Rflags, btos);
2705 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2706 __ delayed()->cmp(Rflags, ztos);
2707
2708 // btos
2709 {
2710 __ pop_i();
2711 if (!is_static) pop_and_check_object(Rclass);
2712 __ stb(Otos_i, Rclass, Roffset);
2713 if (!is_static && rc == may_rewrite) {
2714 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2715 }
2716 __ ba(checkVolatile);
2717 __ delayed()->tst(Lscratch);
2718 }
2719
2720 __ bind(notByte);
2721
2722 // cmp(Rflags, btos);
2723 __ br(Assembler::notEqual, false, Assembler::pt, notBool);
2724 __ delayed()->cmp(Rflags, ltos);
2725
2726 // ztos
2727 {
2728 __ pop_i();
2729 if (!is_static) pop_and_check_object(Rclass);
2730 __ and3(Otos_i, 1, Otos_i);
2731 __ stb(Otos_i, Rclass, Roffset);
2732 if (!is_static && rc == may_rewrite) {
2733 patch_bytecode(Bytecodes::_fast_zputfield, G3_scratch, G4_scratch, true, byte_no);
2734 }
2735 __ ba(checkVolatile);
2736 __ delayed()->tst(Lscratch);
2737 }
2738
2739 __ bind(notBool);
2740 // cmp(Rflags, ltos);
2741 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2742 __ delayed()->cmp(Rflags, ctos);
2743
2744 // ltos
2745 {
2746 __ pop_l();
2747 if (!is_static) pop_and_check_object(Rclass);
2748 __ st_long(Otos_l, Rclass, Roffset);
2749 if (!is_static && rc == may_rewrite) {
2750 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2751 }
2752 __ ba(checkVolatile);
2753 __ delayed()->tst(Lscratch);
2754 }
2755
2756 __ bind(notLong);
2757 // cmp(Rflags, ctos);
2758 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2759 __ delayed()->cmp(Rflags, stos);
2760
2761 // ctos (char)
2762 {
2763 __ pop_i();
2764 if (!is_static) pop_and_check_object(Rclass);
2765 __ sth(Otos_i, Rclass, Roffset);
2766 if (!is_static && rc == may_rewrite) {
2767 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2768 }
2769 __ ba(checkVolatile);
2770 __ delayed()->tst(Lscratch);
2771 }
2772
2773 __ bind(notChar);
2774 // cmp(Rflags, stos);
2775 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2776 __ delayed()->cmp(Rflags, ftos);
2777
2778 // stos (short)
2779 {
2780 __ pop_i();
2781 if (!is_static) pop_and_check_object(Rclass);
2782 __ sth(Otos_i, Rclass, Roffset);
2783 if (!is_static && rc == may_rewrite) {
2784 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2785 }
2786 __ ba(checkVolatile);
2787 __ delayed()->tst(Lscratch);
2788 }
2789
2790 __ bind(notShort);
2791 // cmp(Rflags, ftos);
2792 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2793 __ delayed()->nop();
2794
2795 // ftos
2796 {
2797 __ pop_f();
2798 if (!is_static) pop_and_check_object(Rclass);
2799 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2800 if (!is_static && rc == may_rewrite) {
2801 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2802 }
2803 __ ba(checkVolatile);
2804 __ delayed()->tst(Lscratch);
2805 }
2806
2807 __ bind(notFloat);
2808
2809 // dtos
2810 {
2811 __ pop_d();
2812 if (!is_static) pop_and_check_object(Rclass);
2813 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2814 if (!is_static && rc == may_rewrite) {
2815 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2816 }
2817 }
2818
2819 __ bind(checkVolatile);
2820 __ tst(Lscratch);
2821
2822 if (__ membar_has_effect(write_bits)) {
2823 // __ tst(Lscratch); in delay slot
2824 __ br(Assembler::zero, false, Assembler::pt, exit);
2825 __ delayed()->nop();
2826 volatile_barrier(Assembler::StoreLoad);
2827 __ bind(exit);
2828 }
2829 }
2830
fast_storefield(TosState state)2831 void TemplateTable::fast_storefield(TosState state) {
2832 transition(state, vtos);
2833 Register Rcache = G3_scratch;
2834 Register Rclass = Rcache;
2835 Register Roffset= G4_scratch;
2836 Register Rflags = G1_scratch;
2837 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2838
2839 jvmti_post_fast_field_mod();
2840
2841 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2842
2843 Assembler::Membar_mask_bits read_bits =
2844 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2845 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2846
2847 Label notVolatile, checkVolatile, exit;
2848 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2849 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2850 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2851 __ and3(Rflags, Lscratch, Lscratch);
2852 if (__ membar_has_effect(read_bits)) {
2853 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2854 volatile_barrier(read_bits);
2855 __ bind(notVolatile);
2856 }
2857 }
2858
2859 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2860 pop_and_check_object(Rclass);
2861
2862 switch (bytecode()) {
2863 case Bytecodes::_fast_zputfield: __ and3(Otos_i, 1, Otos_i); // fall through to bputfield
2864 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2865 case Bytecodes::_fast_cputfield: /* fall through */
2866 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2867 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2868 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2869 case Bytecodes::_fast_fputfield:
2870 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2871 break;
2872 case Bytecodes::_fast_dputfield:
2873 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2874 break;
2875 case Bytecodes::_fast_aputfield:
2876 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch);
2877 break;
2878 default:
2879 ShouldNotReachHere();
2880 }
2881
2882 if (__ membar_has_effect(write_bits)) {
2883 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2884 volatile_barrier(Assembler::StoreLoad);
2885 __ bind(exit);
2886 }
2887 }
2888
putfield(int byte_no)2889 void TemplateTable::putfield(int byte_no) {
2890 putfield_or_static(byte_no, false);
2891 }
2892
nofast_putfield(int byte_no)2893 void TemplateTable::nofast_putfield(int byte_no) {
2894 putfield_or_static(byte_no, false, may_not_rewrite);
2895 }
2896
putstatic(int byte_no)2897 void TemplateTable::putstatic(int byte_no) {
2898 putfield_or_static(byte_no, true);
2899 }
2900
fast_xaccess(TosState state)2901 void TemplateTable::fast_xaccess(TosState state) {
2902 transition(vtos, state);
2903 Register Rcache = G3_scratch;
2904 Register Roffset = G4_scratch;
2905 Register Rflags = G4_scratch;
2906 Register Rreceiver = Lscratch;
2907
2908 __ ld_ptr(Llocals, 0, Rreceiver);
2909
2910 // access constant pool cache (is resolved)
2911 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2912 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2913 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2914
2915 __ verify_oop(Rreceiver);
2916 __ null_check(Rreceiver);
2917 if (state == atos) {
2918 do_oop_load(_masm, Rreceiver, Roffset, 0, Otos_i, noreg);
2919 } else if (state == itos) {
2920 __ ld (Rreceiver, Roffset, Otos_i) ;
2921 } else if (state == ftos) {
2922 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2923 } else {
2924 ShouldNotReachHere();
2925 }
2926
2927 Assembler::Membar_mask_bits membar_bits =
2928 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2929 if (__ membar_has_effect(membar_bits)) {
2930
2931 // Get is_volatile value in Rflags and check if membar is needed
2932 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2933
2934 // Test volatile
2935 Label notVolatile;
2936 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2937 __ btst(Rflags, Lscratch);
2938 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2939 __ delayed()->nop();
2940 volatile_barrier(membar_bits);
2941 __ bind(notVolatile);
2942 }
2943
2944 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2945 __ sub(Lbcp, 1, Lbcp);
2946 }
2947
2948 //----------------------------------------------------------------------------------------------------
2949 // Calls
2950
count_calls(Register method,Register temp)2951 void TemplateTable::count_calls(Register method, Register temp) {
2952 // implemented elsewhere
2953 ShouldNotReachHere();
2954 }
2955
prepare_invoke(int byte_no,Register method,Register ra,Register index,Register recv,Register flags)2956 void TemplateTable::prepare_invoke(int byte_no,
2957 Register method, // linked method (or i-klass)
2958 Register ra, // return address
2959 Register index, // itable index, MethodType, etc.
2960 Register recv, // if caller wants to see it
2961 Register flags // if caller wants to test it
2962 ) {
2963 // determine flags
2964 const Bytecodes::Code code = bytecode();
2965 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2966 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2967 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2968 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2969 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2970 const bool load_receiver = (recv != noreg);
2971 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2972 assert(recv == noreg || recv == O0, "");
2973 assert(flags == noreg || flags == O1, "");
2974
2975 // setup registers & access constant pool cache
2976 if (recv == noreg) recv = O0;
2977 if (flags == noreg) flags = O1;
2978 const Register temp = O2;
2979 assert_different_registers(method, ra, index, recv, flags, temp);
2980
2981 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2982
2983 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2984
2985 // maybe push appendix to arguments
2986 if (is_invokedynamic || is_invokehandle) {
2987 Label L_no_push;
2988 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
2989 __ btst(flags, temp);
2990 __ br(Assembler::zero, false, Assembler::pt, L_no_push);
2991 __ delayed()->nop();
2992 // Push the appendix as a trailing parameter.
2993 // This must be done before we get the receiver,
2994 // since the parameter_size includes it.
2995 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2996 __ load_resolved_reference_at_index(temp, index, /*tmp*/recv);
2997 __ verify_oop(temp);
2998 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
2999 __ bind(L_no_push);
3000 }
3001
3002 // load receiver if needed (after appendix is pushed so parameter size is correct)
3003 if (load_receiver) {
3004 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
3005 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
3006 __ verify_oop(recv);
3007 }
3008
3009 // compute return type
3010 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
3011 // Make sure we don't need to mask flags after the above shift
3012 ConstantPoolCacheEntry::verify_tos_state_shift();
3013 // load return address
3014 {
3015 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3016 AddressLiteral table(table_addr);
3017 __ set(table, temp);
3018 __ sll(ra, LogBytesPerWord, ra);
3019 __ ld_ptr(Address(temp, ra), ra);
3020 }
3021 }
3022
3023
generate_vtable_call(Register Rrecv,Register Rindex,Register Rret)3024 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
3025 Register Rtemp = G4_scratch;
3026 Register Rcall = Rindex;
3027 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3028
3029 // get target Method* & entry point
3030 __ lookup_virtual_method(Rrecv, Rindex, G5_method);
3031 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3032 __ profile_called_method(G5_method, Rtemp);
3033 __ call_from_interpreter(Rcall, Gargs, Rret);
3034 }
3035
invokevirtual(int byte_no)3036 void TemplateTable::invokevirtual(int byte_no) {
3037 transition(vtos, vtos);
3038 assert(byte_no == f2_byte, "use this argument");
3039
3040 Register Rscratch = G3_scratch;
3041 Register Rtemp = G4_scratch;
3042 Register Rret = Lscratch;
3043 Register O0_recv = O0;
3044 Label notFinal;
3045
3046 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
3047 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3048
3049 // Check for vfinal
3050 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
3051 __ btst(Rret, G4_scratch);
3052 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3053 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
3054
3055 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) {
3056 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
3057 }
3058
3059 invokevfinal_helper(Rscratch, Rret);
3060
3061 __ bind(notFinal);
3062
3063 __ mov(G5_method, Rscratch); // better scratch register
3064 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
3065 // receiver is in O0_recv
3066 __ verify_oop(O0_recv);
3067
3068 // get return address
3069 AddressLiteral table(Interpreter::invoke_return_entry_table());
3070 __ set(table, Rtemp);
3071 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3072 // Make sure we don't need to mask Rret after the above shift
3073 ConstantPoolCacheEntry::verify_tos_state_shift();
3074 __ sll(Rret, LogBytesPerWord, Rret);
3075 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3076
3077 // get receiver klass
3078 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3079 __ load_klass(O0_recv, O0_recv);
3080 __ verify_klass_ptr(O0_recv);
3081
3082 __ profile_virtual_call(O0_recv, O4);
3083
3084 generate_vtable_call(O0_recv, Rscratch, Rret);
3085 }
3086
fast_invokevfinal(int byte_no)3087 void TemplateTable::fast_invokevfinal(int byte_no) {
3088 transition(vtos, vtos);
3089 assert(byte_no == f2_byte, "use this argument");
3090
3091 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3092 /*is_invokevfinal*/true, false);
3093 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3094 invokevfinal_helper(G3_scratch, Lscratch);
3095 }
3096
invokevfinal_helper(Register Rscratch,Register Rret)3097 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3098 Register Rtemp = G4_scratch;
3099
3100 // Load receiver from stack slot
3101 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
3102 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
3103 __ load_receiver(G4_scratch, O0);
3104
3105 // receiver NULL check
3106 __ null_check(O0);
3107
3108 __ profile_final_call(O4);
3109 __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
3110
3111 // get return address
3112 AddressLiteral table(Interpreter::invoke_return_entry_table());
3113 __ set(table, Rtemp);
3114 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3115 // Make sure we don't need to mask Rret after the above shift
3116 ConstantPoolCacheEntry::verify_tos_state_shift();
3117 __ sll(Rret, LogBytesPerWord, Rret);
3118 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3119
3120
3121 // do the call
3122 __ call_from_interpreter(Rscratch, Gargs, Rret);
3123 }
3124
3125
invokespecial(int byte_no)3126 void TemplateTable::invokespecial(int byte_no) {
3127 transition(vtos, vtos);
3128 assert(byte_no == f1_byte, "use this argument");
3129
3130 const Register Rret = Lscratch;
3131 const Register O0_recv = O0;
3132 const Register Rscratch = G3_scratch;
3133
3134 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
3135 __ null_check(O0_recv);
3136
3137 // do the call
3138 __ profile_call(O4);
3139 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3140 __ call_from_interpreter(Rscratch, Gargs, Rret);
3141 }
3142
3143
invokestatic(int byte_no)3144 void TemplateTable::invokestatic(int byte_no) {
3145 transition(vtos, vtos);
3146 assert(byte_no == f1_byte, "use this argument");
3147
3148 const Register Rret = Lscratch;
3149 const Register Rscratch = G3_scratch;
3150
3151 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method*
3152
3153 // do the call
3154 __ profile_call(O4);
3155 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3156 __ call_from_interpreter(Rscratch, Gargs, Rret);
3157 }
3158
invokeinterface_object_method(Register RKlass,Register Rcall,Register Rret,Register Rflags)3159 void TemplateTable::invokeinterface_object_method(Register RKlass,
3160 Register Rcall,
3161 Register Rret,
3162 Register Rflags) {
3163 Register Rscratch = G4_scratch;
3164 Register Rindex = Lscratch;
3165
3166 assert_different_registers(Rscratch, Rindex, Rret);
3167
3168 Label notFinal;
3169
3170 // Check for vfinal
3171 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3172 __ btst(Rflags, Rscratch);
3173 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3174 __ delayed()->nop();
3175
3176 __ profile_final_call(O4);
3177
3178 // do the call - the index (f2) contains the Method*
3179 assert_different_registers(G5_method, Gargs, Rcall);
3180 __ mov(Rindex, G5_method);
3181 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3182 __ call_from_interpreter(Rcall, Gargs, Rret);
3183 __ bind(notFinal);
3184
3185 __ profile_virtual_call(RKlass, O4);
3186 generate_vtable_call(RKlass, Rindex, Rret);
3187 }
3188
3189
invokeinterface(int byte_no)3190 void TemplateTable::invokeinterface(int byte_no) {
3191 transition(vtos, vtos);
3192 assert(byte_no == f1_byte, "use this argument");
3193
3194 const Register Rinterface = G1_scratch;
3195 const Register Rmethod = Lscratch;
3196 const Register Rret = G3_scratch;
3197 const Register O0_recv = O0;
3198 const Register O1_flags = O1;
3199 const Register O2_Klass = O2;
3200 const Register Rscratch = G4_scratch;
3201 assert_different_registers(Rscratch, G5_method);
3202
3203 prepare_invoke(byte_no, Rinterface, Rret, Rmethod, O0_recv, O1_flags);
3204
3205 // First check for Object case, then private interface method,
3206 // then regular interface method.
3207
3208 // get receiver klass - this is also a null check
3209 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3210 __ load_klass(O0_recv, O2_Klass);
3211
3212 // Special case of invokeinterface called for virtual method of
3213 // java.lang.Object. See cpCache.cpp for details.
3214 Label notObjectMethod;
3215 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
3216 __ btst(O1_flags, Rscratch);
3217 __ br(Assembler::zero, false, Assembler::pt, notObjectMethod);
3218 __ delayed()->nop();
3219
3220 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
3221
3222 __ bind(notObjectMethod);
3223
3224 Label L_no_such_interface;
3225
3226 // Check for private method invocation - indicated by vfinal
3227 Label notVFinal;
3228 {
3229 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3230 __ btst(O1_flags, Rscratch);
3231 __ br(Assembler::zero, false, Assembler::pt, notVFinal);
3232 __ delayed()->nop();
3233
3234 Label subtype;
3235 Register Rtemp = O1_flags;
3236 __ check_klass_subtype(O2_Klass, Rinterface, Rscratch, Rtemp, subtype);
3237 // If we get here the typecheck failed
3238 __ ba(L_no_such_interface);
3239 __ delayed()->nop();
3240 __ bind(subtype);
3241
3242 // do the call
3243 Register Rcall = Rinterface;
3244 __ mov(Rmethod, G5_method);
3245 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3246
3247 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3248 __ profile_final_call(Rscratch);
3249 __ call_from_interpreter(Rcall, Gargs, Rret);
3250 }
3251 __ bind(notVFinal);
3252
3253 Register Rtemp = O1_flags;
3254
3255 // Receiver subtype check against REFC.
3256 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3257 O2_Klass, Rinterface, noreg,
3258 // outputs: temp reg1, temp reg2, temp reg3
3259 G5_method, Rscratch, Rtemp,
3260 L_no_such_interface,
3261 /*return_method=*/false);
3262
3263 __ profile_virtual_call(O2_Klass, O4);
3264
3265 //
3266 // find entry point to call
3267 //
3268
3269 // Get declaring interface class from method
3270 __ ld_ptr(Rmethod, Method::const_offset(), Rinterface);
3271 __ ld_ptr(Rinterface, ConstMethod::constants_offset(), Rinterface);
3272 __ ld_ptr(Rinterface, ConstantPool::pool_holder_offset_in_bytes(), Rinterface);
3273
3274 // Get itable index from method
3275 const Register Rindex = G5_method;
3276 __ ld(Rmethod, Method::itable_index_offset(), Rindex);
3277 __ sub(Rindex, Method::itable_index_max, Rindex);
3278 __ neg(Rindex);
3279
3280 // Preserve O2_Klass for throw_AbstractMethodErrorVerbose
3281 __ mov(O2_Klass, O4);
3282 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3283 O4, Rinterface, Rindex,
3284 // outputs: method, scan temp reg, temp reg
3285 G5_method, Rscratch, Rtemp,
3286 L_no_such_interface);
3287
3288 // Check for abstract method error.
3289 {
3290 Label ok;
3291 __ br_notnull_short(G5_method, Assembler::pt, ok);
3292 // Pass arguments for generating a verbose error message.
3293 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3294 O2_Klass, Rmethod);
3295 __ should_not_reach_here();
3296 __ bind(ok);
3297 }
3298
3299 Register Rcall = Rinterface;
3300 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3301
3302 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3303 __ profile_called_method(G5_method, Rscratch);
3304 __ call_from_interpreter(Rcall, Gargs, Rret);
3305
3306 __ bind(L_no_such_interface);
3307 // Pass arguments for generating a verbose error message.
3308 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3309 O2_Klass, Rinterface);
3310 __ should_not_reach_here();
3311 }
3312
invokehandle(int byte_no)3313 void TemplateTable::invokehandle(int byte_no) {
3314 transition(vtos, vtos);
3315 assert(byte_no == f1_byte, "use this argument");
3316
3317 const Register Rret = Lscratch;
3318 const Register G4_mtype = G4_scratch;
3319 const Register O0_recv = O0;
3320 const Register Rscratch = G3_scratch;
3321
3322 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
3323 __ null_check(O0_recv);
3324
3325 // G4: MethodType object (from cpool->resolved_references[f1], if necessary)
3326 // G5: MH.invokeExact_MT method (from f2)
3327
3328 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke
3329
3330 // do the call
3331 __ verify_oop(G4_mtype);
3332 __ profile_final_call(O4); // FIXME: profile the LambdaForm also
3333 __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
3334 __ call_from_interpreter(Rscratch, Gargs, Rret);
3335 }
3336
3337
invokedynamic(int byte_no)3338 void TemplateTable::invokedynamic(int byte_no) {
3339 transition(vtos, vtos);
3340 assert(byte_no == f1_byte, "use this argument");
3341
3342 const Register Rret = Lscratch;
3343 const Register G4_callsite = G4_scratch;
3344 const Register Rscratch = G3_scratch;
3345
3346 prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
3347
3348 // G4: CallSite object (from cpool->resolved_references[f1])
3349 // G5: MH.linkToCallSite method (from f2)
3350
3351 // Note: G4_callsite is already pushed by prepare_invoke
3352
3353 // %%% should make a type profile for any invokedynamic that takes a ref argument
3354 // profile this call
3355 __ profile_call(O4);
3356
3357 // do the call
3358 __ verify_oop(G4_callsite);
3359 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3360 __ call_from_interpreter(Rscratch, Gargs, Rret);
3361 }
3362
3363
3364 //----------------------------------------------------------------------------------------------------
3365 // Allocation
3366
_new()3367 void TemplateTable::_new() {
3368 transition(vtos, atos);
3369
3370 Label slow_case;
3371 Label done;
3372 Label initialize_header;
3373 Label initialize_object; // including clearing the fields
3374
3375 Register RallocatedObject = Otos_i;
3376 Register RinstanceKlass = O1;
3377 Register Roffset = O3;
3378 Register Rscratch = O4;
3379
3380 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3381 __ get_cpool_and_tags(Rscratch, G3_scratch);
3382 // make sure the class we're about to instantiate has been resolved
3383 // This is done before loading InstanceKlass to be consistent with the order
3384 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3385 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3386 __ ldub(G3_scratch, Roffset, G3_scratch);
3387 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3388 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3389 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3390 // get InstanceKlass
3391 __ load_resolved_klass_at_offset(Rscratch, Roffset, RinstanceKlass);
3392
3393 // make sure klass is fully initialized:
3394 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
3395 __ cmp(G3_scratch, InstanceKlass::fully_initialized);
3396 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3397 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3398
3399 // get instance_size in InstanceKlass (already aligned)
3400 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3401
3402 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3403 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3404 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3405 __ delayed()->nop();
3406
3407 // Allocate the instance:
3408 // If TLAB is enabled:
3409 // Try to allocate in the TLAB.
3410 // If fails, go to the slow path.
3411 // Else If inline contiguous allocations are enabled:
3412 // Try to allocate in eden.
3413 // If fails due to heap end, go to slow path.
3414 //
3415 // If TLAB is enabled OR inline contiguous is enabled:
3416 // Initialize the allocation.
3417 // Exit.
3418 //
3419 // Go to slow path.
3420
3421 const bool allow_shared_alloc =
3422 Universe::heap()->supports_inline_contig_alloc();
3423
3424 if(UseTLAB) {
3425 Register RoldTopValue = RallocatedObject;
3426 Register RtlabWasteLimitValue = G3_scratch;
3427 Register RnewTopValue = G1_scratch;
3428 Register RendValue = Rscratch;
3429 Register RfreeValue = RnewTopValue;
3430
3431 // check if we can allocate in the TLAB
3432 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3433 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3434 __ add(RoldTopValue, Roffset, RnewTopValue);
3435
3436 // if there is enough space, we do not CAS and do not clear
3437 __ cmp(RnewTopValue, RendValue);
3438 if(ZeroTLAB) {
3439 // the fields have already been cleared
3440 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3441 } else {
3442 // initialize both the header and fields
3443 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3444 }
3445 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3446
3447 // Allocation does not fit in the TLAB.
3448 __ ba_short(slow_case);
3449 } else {
3450 // Allocation in the shared Eden
3451 if (allow_shared_alloc) {
3452 Register RoldTopValue = G1_scratch;
3453 Register RtopAddr = G3_scratch;
3454 Register RnewTopValue = RallocatedObject;
3455 Register RendValue = Rscratch;
3456
3457 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3458
3459 Label retry;
3460 __ bind(retry);
3461 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3462 __ ld_ptr(RendValue, 0, RendValue);
3463 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3464 __ add(RoldTopValue, Roffset, RnewTopValue);
3465
3466 // RnewTopValue contains the top address after the new object
3467 // has been allocated.
3468 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3469
3470 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
3471
3472 // if someone beat us on the allocation, try again, otherwise continue
3473 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3474
3475 // bump total bytes allocated by this thread
3476 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3477 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3478 }
3479 }
3480
3481 // If UseTLAB or allow_shared_alloc are true, the object is created above and
3482 // there is an initialize need. Otherwise, skip and go to the slow path.
3483 if (UseTLAB || allow_shared_alloc) {
3484 // clear object fields
3485 __ bind(initialize_object);
3486 __ deccc(Roffset, sizeof(oopDesc));
3487 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3488 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3489
3490 // initialize remaining object fields
3491 if (UseBlockZeroing) {
3492 // Use BIS for zeroing
3493 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3494 } else {
3495 Label loop;
3496 __ subcc(Roffset, wordSize, Roffset);
3497 __ bind(loop);
3498 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3499 __ st_ptr(G0, G3_scratch, Roffset);
3500 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3501 __ delayed()->subcc(Roffset, wordSize, Roffset);
3502 }
3503 __ ba_short(initialize_header);
3504 }
3505
3506 // slow case
3507 __ bind(slow_case);
3508 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3509 __ get_constant_pool(O1);
3510
3511 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3512
3513 __ ba_short(done);
3514
3515 // Initialize the header: mark, klass
3516 __ bind(initialize_header);
3517
3518 if (UseBiasedLocking) {
3519 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
3520 } else {
3521 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3522 }
3523 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3524 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3525 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3526
3527 {
3528 SkipIfEqual skip_if(
3529 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3530 // Trigger dtrace event
3531 __ push(atos);
3532 __ call_VM_leaf(noreg,
3533 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3534 __ pop(atos);
3535 }
3536
3537 // continue
3538 __ bind(done);
3539 }
3540
3541
3542
newarray()3543 void TemplateTable::newarray() {
3544 transition(itos, atos);
3545 __ ldub(Lbcp, 1, O1);
3546 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3547 }
3548
3549
anewarray()3550 void TemplateTable::anewarray() {
3551 transition(itos, atos);
3552 __ get_constant_pool(O1);
3553 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3554 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3555 }
3556
3557
arraylength()3558 void TemplateTable::arraylength() {
3559 transition(atos, itos);
3560 Label ok;
3561 __ verify_oop(Otos_i);
3562 __ tst(Otos_i);
3563 __ throw_if_not_1_x( Assembler::notZero, ok );
3564 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3565 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3566 }
3567
3568
checkcast()3569 void TemplateTable::checkcast() {
3570 transition(atos, atos);
3571 Label done, is_null, quicked, cast_ok, resolved;
3572 Register Roffset = G1_scratch;
3573 Register RobjKlass = O5;
3574 Register RspecifiedKlass = O4;
3575
3576 // Check for casting a NULL
3577 __ br_null(Otos_i, false, Assembler::pn, is_null);
3578 __ delayed()->nop();
3579
3580 // Get value klass in RobjKlass
3581 __ load_klass(Otos_i, RobjKlass); // get value klass
3582
3583 // Get constant pool tag
3584 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3585
3586 // See if the checkcast has been quickened
3587 __ get_cpool_and_tags(Lscratch, G3_scratch);
3588 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3589 __ ldub(G3_scratch, Roffset, G3_scratch);
3590 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3591 __ br(Assembler::equal, true, Assembler::pt, quicked);
3592 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3593
3594 __ push_ptr(); // save receiver for result, and for GC
3595 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3596 __ get_vm_result_2(RspecifiedKlass);
3597 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3598
3599 __ ba_short(resolved);
3600
3601 // Extract target class from constant pool
3602 __ bind(quicked);
3603 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass);
3604
3605
3606 __ bind(resolved);
3607 __ load_klass(Otos_i, RobjKlass); // get value klass
3608
3609 // Generate a fast subtype check. Branch to cast_ok if no
3610 // failure. Throw exception if failure.
3611 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3612
3613 // Not a subtype; so must throw exception
3614 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3615
3616 __ bind(cast_ok);
3617
3618 if (ProfileInterpreter) {
3619 __ ba_short(done);
3620 }
3621 __ bind(is_null);
3622 __ profile_null_seen(G3_scratch);
3623 __ bind(done);
3624 }
3625
3626
instanceof()3627 void TemplateTable::instanceof() {
3628 Label done, is_null, quicked, resolved;
3629 transition(atos, itos);
3630 Register Roffset = G1_scratch;
3631 Register RobjKlass = O5;
3632 Register RspecifiedKlass = O4;
3633
3634 // Check for casting a NULL
3635 __ br_null(Otos_i, false, Assembler::pt, is_null);
3636 __ delayed()->nop();
3637
3638 // Get value klass in RobjKlass
3639 __ load_klass(Otos_i, RobjKlass); // get value klass
3640
3641 // Get constant pool tag
3642 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3643
3644 // See if the checkcast has been quickened
3645 __ get_cpool_and_tags(Lscratch, G3_scratch);
3646 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3647 __ ldub(G3_scratch, Roffset, G3_scratch);
3648 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3649 __ br(Assembler::equal, true, Assembler::pt, quicked);
3650 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3651
3652 __ push_ptr(); // save receiver for result, and for GC
3653 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3654 __ get_vm_result_2(RspecifiedKlass);
3655 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3656
3657 __ ba_short(resolved);
3658
3659 // Extract target class from constant pool
3660 __ bind(quicked);
3661 __ get_constant_pool(Lscratch);
3662 __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass);
3663
3664 __ bind(resolved);
3665 __ load_klass(Otos_i, RobjKlass); // get value klass
3666
3667 // Generate a fast subtype check. Branch to cast_ok if no
3668 // failure. Return 0 if failure.
3669 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3670 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3671 // Not a subtype; return 0;
3672 __ clr( Otos_i );
3673
3674 if (ProfileInterpreter) {
3675 __ ba_short(done);
3676 }
3677 __ bind(is_null);
3678 __ profile_null_seen(G3_scratch);
3679 __ bind(done);
3680 }
3681
_breakpoint()3682 void TemplateTable::_breakpoint() {
3683
3684 // Note: We get here even if we are single stepping..
3685 // jbug insists on setting breakpoints at every bytecode
3686 // even if we are in single step mode.
3687
3688 transition(vtos, vtos);
3689 // get the unpatched byte code
3690 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3691 __ mov(O0, Lbyte_code);
3692
3693 // post the breakpoint event
3694 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3695
3696 // complete the execution of original bytecode
3697 __ dispatch_normal(vtos);
3698 }
3699
3700
3701 //----------------------------------------------------------------------------------------------------
3702 // Exceptions
3703
athrow()3704 void TemplateTable::athrow() {
3705 transition(atos, vtos);
3706
3707 // This works because exception is cached in Otos_i which is same as O0,
3708 // which is same as what throw_exception_entry_expects
3709 assert(Otos_i == Oexception, "see explanation above");
3710
3711 __ verify_oop(Otos_i);
3712 __ null_check(Otos_i);
3713 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3714 }
3715
3716
3717 //----------------------------------------------------------------------------------------------------
3718 // Synchronization
3719
3720
3721 // See frame_sparc.hpp for monitor block layout.
3722 // Monitor elements are dynamically allocated by growing stack as needed.
3723
monitorenter()3724 void TemplateTable::monitorenter() {
3725 transition(atos, vtos);
3726 __ verify_oop(Otos_i);
3727 // Try to acquire a lock on the object
3728 // Repeat until succeeded (i.e., until
3729 // monitorenter returns true).
3730
3731 { Label ok;
3732 __ tst(Otos_i);
3733 __ throw_if_not_1_x( Assembler::notZero, ok);
3734 __ delayed()->mov(Otos_i, Lscratch); // save obj
3735 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3736 }
3737
3738 assert(O0 == Otos_i, "Be sure where the object to lock is");
3739
3740 // find a free slot in the monitor block
3741
3742
3743 // initialize entry pointer
3744 __ clr(O1); // points to free slot or NULL
3745
3746 {
3747 Label entry, loop, exit;
3748 __ add( __ top_most_monitor(), O2 ); // last one to check
3749 __ ba( entry );
3750 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3751
3752
3753 __ bind( loop );
3754
3755 __ verify_oop(O4); // verify each monitor's oop
3756 __ tst(O4); // is this entry unused?
3757 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3758
3759 __ cmp(O4, O0); // check if current entry is for same object
3760 __ brx( Assembler::equal, false, Assembler::pn, exit );
3761 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3762
3763 __ bind( entry );
3764
3765 __ cmp( O3, O2 );
3766 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3767 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3768
3769 __ bind( exit );
3770 }
3771
3772 { Label allocated;
3773
3774 // found free slot?
3775 __ br_notnull_short(O1, Assembler::pn, allocated);
3776
3777 __ add_monitor_to_stack( false, O2, O3 );
3778 __ mov(Lmonitors, O1);
3779
3780 __ bind(allocated);
3781 }
3782
3783 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3784 // The object has already been poped from the stack, so the expression stack looks correct.
3785 __ inc(Lbcp);
3786
3787 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3788 __ lock_object(O1, O0);
3789
3790 // check if there's enough space on the stack for the monitors after locking
3791 __ generate_stack_overflow_check(0);
3792
3793 // The bcp has already been incremented. Just need to dispatch to next instruction.
3794 __ dispatch_next(vtos);
3795 }
3796
3797
monitorexit()3798 void TemplateTable::monitorexit() {
3799 transition(atos, vtos);
3800 __ verify_oop(Otos_i);
3801 __ tst(Otos_i);
3802 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3803
3804 assert(O0 == Otos_i, "just checking");
3805
3806 { Label entry, loop, found;
3807 __ add( __ top_most_monitor(), O2 ); // last one to check
3808 __ ba(entry);
3809 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3810 // By using a local it survives the call to the C routine.
3811 __ delayed()->mov( Lmonitors, Lscratch );
3812
3813 __ bind( loop );
3814
3815 __ verify_oop(O4); // verify each monitor's oop
3816 __ cmp(O4, O0); // check if current entry is for desired object
3817 __ brx( Assembler::equal, true, Assembler::pt, found );
3818 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3819
3820 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3821
3822 __ bind( entry );
3823
3824 __ cmp( Lscratch, O2 );
3825 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3826 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3827
3828 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3829 __ should_not_reach_here();
3830
3831 __ bind(found);
3832 }
3833 __ unlock_object(O1);
3834 }
3835
3836
3837 //----------------------------------------------------------------------------------------------------
3838 // Wide instructions
3839
wide()3840 void TemplateTable::wide() {
3841 transition(vtos, vtos);
3842 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3843 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3844 AddressLiteral ep(Interpreter::_wentry_point);
3845 __ set(ep, G4_scratch);
3846 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3847 __ jmp(G3_scratch, G0);
3848 __ delayed()->nop();
3849 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3850 }
3851
3852
3853 //----------------------------------------------------------------------------------------------------
3854 // Multi arrays
3855
multianewarray()3856 void TemplateTable::multianewarray() {
3857 transition(vtos, atos);
3858 // put ndims * wordSize into Lscratch
3859 __ ldub( Lbcp, 3, Lscratch);
3860 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3861 // Lesp points past last_dim, so set to O1 to first_dim address
3862 __ add( Lesp, Lscratch, O1);
3863 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3864 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3865 }
3866