1 /*
2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "nativeInst_sparc.hpp"
38 #include "oops/objArrayKlass.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/interfaceSupport.inline.hpp"
41 #include "runtime/jniHandles.inline.hpp"
42 #include "runtime/safepointMechanism.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44
45 #define __ _masm->
46
47
48 //------------------------------------------------------------
49
50
is_small_constant(LIR_Opr opr)51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
52 if (opr->is_constant()) {
53 LIR_Const* constant = opr->as_constant_ptr();
54 switch (constant->type()) {
55 case T_INT: {
56 jint value = constant->as_jint();
57 return Assembler::is_simm13(value);
58 }
59
60 default:
61 return false;
62 }
63 }
64 return false;
65 }
66
67
is_single_instruction(LIR_Op * op)68 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
69 switch (op->code()) {
70 case lir_null_check:
71 return true;
72
73
74 case lir_add:
75 case lir_ushr:
76 case lir_shr:
77 case lir_shl:
78 // integer shifts and adds are always one instruction
79 return op->result_opr()->is_single_cpu();
80
81
82 case lir_move: {
83 LIR_Op1* op1 = op->as_Op1();
84 LIR_Opr src = op1->in_opr();
85 LIR_Opr dst = op1->result_opr();
86
87 if (src == dst) {
88 NEEDS_CLEANUP;
89 // this works around a problem where moves with the same src and dst
90 // end up in the delay slot and then the assembler swallows the mov
91 // since it has no effect and then it complains because the delay slot
92 // is empty. returning false stops the optimizer from putting this in
93 // the delay slot
94 return false;
95 }
96
97 // don't put moves involving oops into the delay slot since the VerifyOops code
98 // will make it much larger than a single instruction.
99 if (VerifyOops) {
100 return false;
101 }
102
103 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
104 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
105 return false;
106 }
107
108 if (UseCompressedOops) {
109 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
110 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
111 }
112
113 if (UseCompressedClassPointers) {
114 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
115 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
116 }
117
118 if (dst->is_register()) {
119 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
120 return !PatchALot;
121 } else if (src->is_single_stack()) {
122 return true;
123 }
124 }
125
126 if (src->is_register()) {
127 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
128 return !PatchALot;
129 } else if (dst->is_single_stack()) {
130 return true;
131 }
132 }
133
134 if (dst->is_register() &&
135 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
136 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
137 return true;
138 }
139
140 return false;
141 }
142
143 default:
144 return false;
145 }
146 ShouldNotReachHere();
147 }
148
149
receiverOpr()150 LIR_Opr LIR_Assembler::receiverOpr() {
151 return FrameMap::O0_oop_opr;
152 }
153
154
osrBufferPointer()155 LIR_Opr LIR_Assembler::osrBufferPointer() {
156 return FrameMap::I0_opr;
157 }
158
159
initial_frame_size_in_bytes() const160 int LIR_Assembler::initial_frame_size_in_bytes() const {
161 return in_bytes(frame_map()->framesize_in_bytes());
162 }
163
164
165 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
166 // we fetch the class of the receiver (O0) and compare it with the cached class.
167 // If they do not match we jump to slow case.
check_icache()168 int LIR_Assembler::check_icache() {
169 int offset = __ offset();
170 __ inline_cache_check(O0, G5_inline_cache_reg);
171 return offset;
172 }
173
174
osr_entry()175 void LIR_Assembler::osr_entry() {
176 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
177 //
178 // 1. Create a new compiled activation.
179 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
180 // at the osr_bci; it is not initialized.
181 // 3. Jump to the continuation address in compiled code to resume execution.
182
183 // OSR entry point
184 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
185 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
186 ValueStack* entry_state = osr_entry->end()->state();
187 int number_of_locks = entry_state->locks_size();
188
189 // Create a frame for the compiled activation.
190 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
191
192 // OSR buffer is
193 //
194 // locals[nlocals-1..0]
195 // monitors[number_of_locks-1..0]
196 //
197 // locals is a direct copy of the interpreter frame so in the osr buffer
198 // so first slot in the local array is the last local from the interpreter
199 // and last slot is local[0] (receiver) from the interpreter
200 //
201 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
202 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
203 // in the interpreter frame (the method lock if a sync method)
204
205 // Initialize monitors in the compiled activation.
206 // I0: pointer to osr buffer
207 //
208 // All other registers are dead at this point and the locals will be
209 // copied into place by code emitted in the IR.
210
211 Register OSR_buf = osrBufferPointer()->as_register();
212 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
213 int monitor_offset = BytesPerWord * method()->max_locals() +
214 (2 * BytesPerWord) * (number_of_locks - 1);
215 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
216 // the OSR buffer using 2 word entries: first the lock and then
217 // the oop.
218 for (int i = 0; i < number_of_locks; i++) {
219 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
220 #ifdef ASSERT
221 // verify the interpreter's monitor has a non-null object
222 {
223 Label L;
224 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
225 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L);
226 __ stop("locked object is NULL");
227 __ bind(L);
228 }
229 #endif // ASSERT
230 // Copy the lock field into the compiled activation.
231 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
232 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
233 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
234 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
235 }
236 }
237 }
238
239
240 // --------------------------------------------------------------------------------------------
241
monitorexit(LIR_Opr obj_opr,LIR_Opr lock_opr,Register hdr,int monitor_no)242 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
243 if (!GenerateSynchronizationCode) return;
244
245 Register obj_reg = obj_opr->as_register();
246 Register lock_reg = lock_opr->as_register();
247
248 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
249 Register reg = mon_addr.base();
250 int offset = mon_addr.disp();
251 // compute pointer to BasicLock
252 if (mon_addr.is_simm13()) {
253 __ add(reg, offset, lock_reg);
254 }
255 else {
256 __ set(offset, lock_reg);
257 __ add(reg, lock_reg, lock_reg);
258 }
259 // unlock object
260 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
261 // _slow_case_stubs->append(slow_case);
262 // temporary fix: must be created after exceptionhandler, therefore as call stub
263 _slow_case_stubs->append(slow_case);
264 if (UseFastLocking) {
265 // try inlined fast unlocking first, revert to slow locking if it fails
266 // note: lock_reg points to the displaced header since the displaced header offset is 0!
267 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
268 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
269 } else {
270 // always do slow unlocking
271 // note: the slow unlocking code could be inlined here, however if we use
272 // slow unlocking, speed doesn't matter anyway and this solution is
273 // simpler and requires less duplicated code - additionally, the
274 // slow unlocking code is the same in either case which simplifies
275 // debugging
276 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
277 __ delayed()->nop();
278 }
279 // done
280 __ bind(*slow_case->continuation());
281 }
282
283
emit_exception_handler()284 int LIR_Assembler::emit_exception_handler() {
285 // if the last instruction is a call (typically to do a throw which
286 // is coming at the end after block reordering) the return address
287 // must still point into the code area in order to avoid assertion
288 // failures when searching for the corresponding bci => add a nop
289 // (was bug 5/14/1999 - gri)
290 __ nop();
291
292 // generate code for exception handler
293 ciMethod* method = compilation()->method();
294
295 address handler_base = __ start_a_stub(exception_handler_size());
296
297 if (handler_base == NULL) {
298 // not enough space left for the handler
299 bailout("exception handler overflow");
300 return -1;
301 }
302
303 int offset = code_offset();
304
305 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
306 __ delayed()->nop();
307 __ should_not_reach_here();
308 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
309 __ end_a_stub();
310
311 return offset;
312 }
313
314
315 // Emit the code to remove the frame from the stack in the exception
316 // unwind path.
emit_unwind_handler()317 int LIR_Assembler::emit_unwind_handler() {
318 #ifndef PRODUCT
319 if (CommentedAssembly) {
320 _masm->block_comment("Unwind handler");
321 }
322 #endif
323
324 int offset = code_offset();
325
326 // Fetch the exception from TLS and clear out exception related thread state
327 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
328 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
329 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
330
331 __ bind(_unwind_handler_entry);
332 __ verify_not_null_oop(O0);
333 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
334 __ mov(O0, I0); // Preserve the exception
335 }
336
337 // Preform needed unlocking
338 MonitorExitStub* stub = NULL;
339 if (method()->is_synchronized()) {
340 monitor_address(0, FrameMap::I1_opr);
341 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
342 __ unlock_object(I3, I2, I1, *stub->entry());
343 __ bind(*stub->continuation());
344 }
345
346 if (compilation()->env()->dtrace_method_probes()) {
347 __ mov(G2_thread, O0);
348 __ save_thread(I1); // need to preserve thread in G2 across
349 // runtime call
350 metadata2reg(method()->constant_encoding(), O1);
351 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
352 __ delayed()->nop();
353 __ restore_thread(I1);
354 }
355
356 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
357 __ mov(I0, O0); // Restore the exception
358 }
359
360 // dispatch to the unwind logic
361 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
362 __ delayed()->nop();
363
364 // Emit the slow path assembly
365 if (stub != NULL) {
366 stub->emit_code(this);
367 }
368
369 return offset;
370 }
371
372
emit_deopt_handler()373 int LIR_Assembler::emit_deopt_handler() {
374 // if the last instruction is a call (typically to do a throw which
375 // is coming at the end after block reordering) the return address
376 // must still point into the code area in order to avoid assertion
377 // failures when searching for the corresponding bci => add a nop
378 // (was bug 5/14/1999 - gri)
379 __ nop();
380
381 // generate code for deopt handler
382 ciMethod* method = compilation()->method();
383 address handler_base = __ start_a_stub(deopt_handler_size());
384 if (handler_base == NULL) {
385 // not enough space left for the handler
386 bailout("deopt handler overflow");
387 return -1;
388 }
389
390 int offset = code_offset();
391 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
392 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
393 __ delayed()->nop();
394 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
395 __ end_a_stub();
396
397 return offset;
398 }
399
400
jobject2reg(jobject o,Register reg)401 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
402 if (o == NULL) {
403 __ set(NULL_WORD, reg);
404 } else {
405 #ifdef ASSERT
406 {
407 ThreadInVMfromNative tiv(JavaThread::current());
408 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
409 }
410 #endif
411 int oop_index = __ oop_recorder()->find_index(o);
412 RelocationHolder rspec = oop_Relocation::spec(oop_index);
413 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
414 }
415 }
416
417
jobject2reg_with_patching(Register reg,CodeEmitInfo * info)418 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
419 // Allocate a new index in table to hold the object once it's been patched
420 int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
421 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
422
423 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
424 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
425 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
426 // NULL will be dynamically patched later and the patched value may be large. We must
427 // therefore generate the sethi/add as a placeholders
428 __ patchable_set(addrlit, reg);
429
430 patching_epilog(patch, lir_patch_normal, reg, info);
431 }
432
433
metadata2reg(Metadata * o,Register reg)434 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
435 __ set_metadata_constant(o, reg);
436 }
437
klass2reg_with_patching(Register reg,CodeEmitInfo * info)438 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
439 // Allocate a new index in table to hold the klass once it's been patched
440 int index = __ oop_recorder()->allocate_metadata_index(NULL);
441 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
442 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index));
443 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
444 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
445 // NULL will be dynamically patched later and the patched value may be large. We must
446 // therefore generate the sethi/add as a placeholders
447 __ patchable_set(addrlit, reg);
448
449 patching_epilog(patch, lir_patch_normal, reg, info);
450 }
451
emit_op3(LIR_Op3 * op)452 void LIR_Assembler::emit_op3(LIR_Op3* op) {
453 switch (op->code()) {
454 case lir_idiv:
455 case lir_irem: // Both idiv & irem are handled after the switch (below).
456 break;
457 case lir_fmaf:
458 __ fmadd(FloatRegisterImpl::S,
459 op->in_opr1()->as_float_reg(),
460 op->in_opr2()->as_float_reg(),
461 op->in_opr3()->as_float_reg(),
462 op->result_opr()->as_float_reg());
463 return;
464 case lir_fmad:
465 __ fmadd(FloatRegisterImpl::D,
466 op->in_opr1()->as_double_reg(),
467 op->in_opr2()->as_double_reg(),
468 op->in_opr3()->as_double_reg(),
469 op->result_opr()->as_double_reg());
470 return;
471 default:
472 ShouldNotReachHere();
473 break;
474 }
475
476 // Handle idiv & irem:
477
478 Register Rdividend = op->in_opr1()->as_register();
479 Register Rdivisor = noreg;
480 Register Rscratch = op->in_opr3()->as_register();
481 Register Rresult = op->result_opr()->as_register();
482 int divisor = -1;
483
484 if (op->in_opr2()->is_register()) {
485 Rdivisor = op->in_opr2()->as_register();
486 } else {
487 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
488 assert(Assembler::is_simm13(divisor), "can only handle simm13");
489 }
490
491 assert(Rdividend != Rscratch, "");
492 assert(Rdivisor != Rscratch, "");
493 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
494
495 if (Rdivisor == noreg && is_power_of_2(divisor)) {
496 // convert division by a power of two into some shifts and logical operations
497 if (op->code() == lir_idiv) {
498 if (divisor == 2) {
499 __ srl(Rdividend, 31, Rscratch);
500 } else {
501 __ sra(Rdividend, 31, Rscratch);
502 __ and3(Rscratch, divisor - 1, Rscratch);
503 }
504 __ add(Rdividend, Rscratch, Rscratch);
505 __ sra(Rscratch, log2_int(divisor), Rresult);
506 return;
507 } else {
508 if (divisor == 2) {
509 __ srl(Rdividend, 31, Rscratch);
510 } else {
511 __ sra(Rdividend, 31, Rscratch);
512 __ and3(Rscratch, divisor - 1,Rscratch);
513 }
514 __ add(Rdividend, Rscratch, Rscratch);
515 __ andn(Rscratch, divisor - 1,Rscratch);
516 __ sub(Rdividend, Rscratch, Rresult);
517 return;
518 }
519 }
520
521 __ sra(Rdividend, 31, Rscratch);
522 __ wry(Rscratch);
523
524 add_debug_info_for_div0_here(op->info());
525
526 if (Rdivisor != noreg) {
527 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
528 } else {
529 assert(Assembler::is_simm13(divisor), "can only handle simm13");
530 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
531 }
532
533 Label skip;
534 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
535 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
536 __ bind(skip);
537
538 if (op->code() == lir_irem) {
539 if (Rdivisor != noreg) {
540 __ smul(Rscratch, Rdivisor, Rscratch);
541 } else {
542 __ smul(Rscratch, divisor, Rscratch);
543 }
544 __ sub(Rdividend, Rscratch, Rresult);
545 }
546 }
547
548
emit_opBranch(LIR_OpBranch * op)549 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
550 #ifdef ASSERT
551 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
552 if (op->block() != NULL) _branch_target_blocks.append(op->block());
553 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
554 #endif
555 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
556
557 if (op->cond() == lir_cond_always) {
558 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
559 } else if (op->code() == lir_cond_float_branch) {
560 assert(op->ublock() != NULL, "must have unordered successor");
561 bool is_unordered = (op->ublock() == op->block());
562 Assembler::Condition acond;
563 switch (op->cond()) {
564 case lir_cond_equal: acond = Assembler::f_equal; break;
565 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
566 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
567 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
568 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
569 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
570 default : ShouldNotReachHere();
571 }
572 __ fb( acond, false, Assembler::pn, *(op->label()));
573 } else {
574 assert (op->code() == lir_branch, "just checking");
575
576 Assembler::Condition acond;
577 switch (op->cond()) {
578 case lir_cond_equal: acond = Assembler::equal; break;
579 case lir_cond_notEqual: acond = Assembler::notEqual; break;
580 case lir_cond_less: acond = Assembler::less; break;
581 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
582 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
583 case lir_cond_greater: acond = Assembler::greater; break;
584 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
585 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
586 default: ShouldNotReachHere();
587 };
588
589 // sparc has different condition codes for testing 32-bit
590 // vs. 64-bit values. We could always test xcc is we could
591 // guarantee that 32-bit loads always sign extended but that isn't
592 // true and since sign extension isn't free, it would impose a
593 // slight cost.
594 if (op->type() == T_INT) {
595 __ br(acond, false, Assembler::pn, *(op->label()));
596 } else
597 __ brx(acond, false, Assembler::pn, *(op->label()));
598 }
599 // The peephole pass fills the delay slot
600 }
601
602
emit_opConvert(LIR_OpConvert * op)603 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
604 Bytecodes::Code code = op->bytecode();
605 LIR_Opr dst = op->result_opr();
606
607 switch(code) {
608 case Bytecodes::_i2l: {
609 Register rlo = dst->as_register_lo();
610 Register rhi = dst->as_register_hi();
611 Register rval = op->in_opr()->as_register();
612 __ sra(rval, 0, rlo);
613 break;
614 }
615 case Bytecodes::_i2d:
616 case Bytecodes::_i2f: {
617 bool is_double = (code == Bytecodes::_i2d);
618 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
619 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
620 FloatRegister rsrc = op->in_opr()->as_float_reg();
621 if (rsrc != rdst) {
622 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
623 }
624 __ fitof(w, rdst, rdst);
625 break;
626 }
627 case Bytecodes::_f2i:{
628 FloatRegister rsrc = op->in_opr()->as_float_reg();
629 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
630 Label L;
631 // result must be 0 if value is NaN; test by comparing value to itself
632 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
633 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
634 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
635 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
636 // move integer result from float register to int register
637 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
638 __ bind (L);
639 break;
640 }
641 case Bytecodes::_l2i: {
642 Register rlo = op->in_opr()->as_register_lo();
643 Register rhi = op->in_opr()->as_register_hi();
644 Register rdst = dst->as_register();
645 __ sra(rlo, 0, rdst);
646 break;
647 }
648 case Bytecodes::_d2f:
649 case Bytecodes::_f2d: {
650 bool is_double = (code == Bytecodes::_f2d);
651 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
652 LIR_Opr val = op->in_opr();
653 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
654 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
655 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
656 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
657 __ ftof(vw, dw, rval, rdst);
658 break;
659 }
660 case Bytecodes::_i2s:
661 case Bytecodes::_i2b: {
662 Register rval = op->in_opr()->as_register();
663 Register rdst = dst->as_register();
664 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
665 __ sll (rval, shift, rdst);
666 __ sra (rdst, shift, rdst);
667 break;
668 }
669 case Bytecodes::_i2c: {
670 Register rval = op->in_opr()->as_register();
671 Register rdst = dst->as_register();
672 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
673 __ sll (rval, shift, rdst);
674 __ srl (rdst, shift, rdst);
675 break;
676 }
677
678 default: ShouldNotReachHere();
679 }
680 }
681
682
align_call(LIR_Code)683 void LIR_Assembler::align_call(LIR_Code) {
684 // do nothing since all instructions are word aligned on sparc
685 }
686
687
call(LIR_OpJavaCall * op,relocInfo::relocType rtype)688 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
689 __ call(op->addr(), rtype);
690 // The peephole pass fills the delay slot, add_call_info is done in
691 // LIR_Assembler::emit_delay.
692 }
693
694
ic_call(LIR_OpJavaCall * op)695 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
696 __ ic_call(op->addr(), false);
697 // The peephole pass fills the delay slot, add_call_info is done in
698 // LIR_Assembler::emit_delay.
699 }
700
701
vtable_call(LIR_OpJavaCall * op)702 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
703 add_debug_info_for_null_check_here(op->info());
704 __ load_klass(O0, G3_scratch);
705 if (Assembler::is_simm13(op->vtable_offset())) {
706 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
707 } else {
708 // This will generate 2 instructions
709 __ set(op->vtable_offset(), G5_method);
710 // ld_ptr, set_hi, set
711 __ ld_ptr(G3_scratch, G5_method, G5_method);
712 }
713 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch);
714 __ callr(G3_scratch, G0);
715 // the peephole pass fills the delay slot
716 }
717
store(LIR_Opr from_reg,Register base,int offset,BasicType type,bool wide,bool unaligned)718 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
719 int store_offset;
720 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
721 assert(base != O7, "destroying register");
722 assert(!unaligned, "can't handle this");
723 // for offsets larger than a simm13 we setup the offset in O7
724 __ set(offset, O7);
725 store_offset = store(from_reg, base, O7, type, wide);
726 } else {
727 if (type == T_ARRAY || type == T_OBJECT) {
728 __ verify_oop(from_reg->as_register());
729 }
730 store_offset = code_offset();
731 switch (type) {
732 case T_BOOLEAN: // fall through
733 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
734 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
735 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
736 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
737 case T_LONG :
738 if (unaligned || PatchALot) {
739 // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
740 assert(G3_scratch != base, "can't handle this");
741 assert(G3_scratch != from_reg->as_register_lo(), "can't handle this");
742 __ srax(from_reg->as_register_lo(), 32, G3_scratch);
743 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
744 __ stw(G3_scratch, base, offset + hi_word_offset_in_bytes);
745 } else {
746 __ stx(from_reg->as_register_lo(), base, offset);
747 }
748 break;
749 case T_ADDRESS:
750 case T_METADATA:
751 __ st_ptr(from_reg->as_register(), base, offset);
752 break;
753 case T_ARRAY : // fall through
754 case T_OBJECT:
755 {
756 if (UseCompressedOops && !wide) {
757 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
758 store_offset = code_offset();
759 __ stw(G3_scratch, base, offset);
760 } else {
761 __ st_ptr(from_reg->as_register(), base, offset);
762 }
763 break;
764 }
765
766 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
767 case T_DOUBLE:
768 {
769 FloatRegister reg = from_reg->as_double_reg();
770 // split unaligned stores
771 if (unaligned || PatchALot) {
772 assert(Assembler::is_simm13(offset + 4), "must be");
773 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
774 __ stf(FloatRegisterImpl::S, reg, base, offset);
775 } else {
776 __ stf(FloatRegisterImpl::D, reg, base, offset);
777 }
778 break;
779 }
780 default : ShouldNotReachHere();
781 }
782 }
783 return store_offset;
784 }
785
786
store(LIR_Opr from_reg,Register base,Register disp,BasicType type,bool wide)787 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
788 if (type == T_ARRAY || type == T_OBJECT) {
789 __ verify_oop(from_reg->as_register());
790 }
791 int store_offset = code_offset();
792 switch (type) {
793 case T_BOOLEAN: // fall through
794 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
795 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
796 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
797 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
798 case T_LONG :
799 __ stx(from_reg->as_register_lo(), base, disp);
800 break;
801 case T_ADDRESS:
802 __ st_ptr(from_reg->as_register(), base, disp);
803 break;
804 case T_ARRAY : // fall through
805 case T_OBJECT:
806 {
807 if (UseCompressedOops && !wide) {
808 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
809 store_offset = code_offset();
810 __ stw(G3_scratch, base, disp);
811 } else {
812 __ st_ptr(from_reg->as_register(), base, disp);
813 }
814 break;
815 }
816 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
817 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
818 default : ShouldNotReachHere();
819 }
820 return store_offset;
821 }
822
823
load(Register base,int offset,LIR_Opr to_reg,BasicType type,bool wide,bool unaligned)824 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
825 int load_offset;
826 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
827 assert(base != O7, "destroying register");
828 assert(!unaligned, "can't handle this");
829 // for offsets larger than a simm13 we setup the offset in O7
830 __ set(offset, O7);
831 load_offset = load(base, O7, to_reg, type, wide);
832 } else {
833 load_offset = code_offset();
834 switch(type) {
835 case T_BOOLEAN: // fall through
836 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
837 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
838 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
839 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
840 case T_LONG :
841 if (!unaligned && !PatchALot) {
842 __ ldx(base, offset, to_reg->as_register_lo());
843 } else {
844 assert(base != to_reg->as_register_lo(), "can't handle this");
845 assert(O7 != to_reg->as_register_lo(), "can't handle this");
846 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
847 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
848 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
849 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
850 }
851 break;
852 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
853 case T_ADDRESS:
854 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
855 __ lduw(base, offset, to_reg->as_register());
856 __ decode_klass_not_null(to_reg->as_register());
857 } else
858 {
859 __ ld_ptr(base, offset, to_reg->as_register());
860 }
861 break;
862 case T_ARRAY : // fall through
863 case T_OBJECT:
864 {
865 if (UseCompressedOops && !wide) {
866 __ lduw(base, offset, to_reg->as_register());
867 __ decode_heap_oop(to_reg->as_register());
868 } else {
869 __ ld_ptr(base, offset, to_reg->as_register());
870 }
871 break;
872 }
873 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
874 case T_DOUBLE:
875 {
876 FloatRegister reg = to_reg->as_double_reg();
877 // split unaligned loads
878 if (unaligned || PatchALot) {
879 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
880 __ ldf(FloatRegisterImpl::S, base, offset, reg);
881 } else {
882 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
883 }
884 break;
885 }
886 default : ShouldNotReachHere();
887 }
888 if (type == T_ARRAY || type == T_OBJECT) {
889 __ verify_oop(to_reg->as_register());
890 }
891 }
892 return load_offset;
893 }
894
895
load(Register base,Register disp,LIR_Opr to_reg,BasicType type,bool wide)896 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
897 int load_offset = code_offset();
898 switch(type) {
899 case T_BOOLEAN: // fall through
900 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
901 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
902 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
903 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
904 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
905 case T_ARRAY : // fall through
906 case T_OBJECT:
907 {
908 if (UseCompressedOops && !wide) {
909 __ lduw(base, disp, to_reg->as_register());
910 __ decode_heap_oop(to_reg->as_register());
911 } else {
912 __ ld_ptr(base, disp, to_reg->as_register());
913 }
914 break;
915 }
916 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
917 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
918 case T_LONG :
919 __ ldx(base, disp, to_reg->as_register_lo());
920 break;
921 default : ShouldNotReachHere();
922 }
923 if (type == T_ARRAY || type == T_OBJECT) {
924 __ verify_oop(to_reg->as_register());
925 }
926 return load_offset;
927 }
928
const2stack(LIR_Opr src,LIR_Opr dest)929 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
930 LIR_Const* c = src->as_constant_ptr();
931 switch (c->type()) {
932 case T_INT:
933 case T_FLOAT: {
934 Register src_reg = O7;
935 int value = c->as_jint_bits();
936 if (value == 0) {
937 src_reg = G0;
938 } else {
939 __ set(value, O7);
940 }
941 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
942 __ stw(src_reg, addr.base(), addr.disp());
943 break;
944 }
945 case T_ADDRESS: {
946 Register src_reg = O7;
947 int value = c->as_jint_bits();
948 if (value == 0) {
949 src_reg = G0;
950 } else {
951 __ set(value, O7);
952 }
953 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
954 __ st_ptr(src_reg, addr.base(), addr.disp());
955 break;
956 }
957 case T_OBJECT: {
958 Register src_reg = O7;
959 jobject2reg(c->as_jobject(), src_reg);
960 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
961 __ st_ptr(src_reg, addr.base(), addr.disp());
962 break;
963 }
964 case T_LONG:
965 case T_DOUBLE: {
966 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
967
968 Register tmp = O7;
969 int value_lo = c->as_jint_lo_bits();
970 if (value_lo == 0) {
971 tmp = G0;
972 } else {
973 __ set(value_lo, O7);
974 }
975 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
976 int value_hi = c->as_jint_hi_bits();
977 if (value_hi == 0) {
978 tmp = G0;
979 } else {
980 __ set(value_hi, O7);
981 }
982 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
983 break;
984 }
985 default:
986 Unimplemented();
987 }
988 }
989
990
const2mem(LIR_Opr src,LIR_Opr dest,BasicType type,CodeEmitInfo * info,bool wide)991 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
992 LIR_Const* c = src->as_constant_ptr();
993 LIR_Address* addr = dest->as_address_ptr();
994 Register base = addr->base()->as_pointer_register();
995 int offset = -1;
996
997 switch (c->type()) {
998 case T_FLOAT: type = T_INT; // Float constants are stored by int store instructions.
999 case T_INT:
1000 case T_ADDRESS: {
1001 LIR_Opr tmp = FrameMap::O7_opr;
1002 int value = c->as_jint_bits();
1003 if (value == 0) {
1004 tmp = FrameMap::G0_opr;
1005 } else if (Assembler::is_simm13(value)) {
1006 __ set(value, O7);
1007 }
1008 if (addr->index()->is_valid()) {
1009 assert(addr->disp() == 0, "must be zero");
1010 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1011 } else {
1012 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1013 offset = store(tmp, base, addr->disp(), type, wide, false);
1014 }
1015 break;
1016 }
1017 case T_LONG:
1018 case T_DOUBLE: {
1019 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
1020 assert(Assembler::is_simm13(addr->disp()) &&
1021 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
1022
1023 LIR_Opr tmp = FrameMap::O7_opr;
1024 int value_lo = c->as_jint_lo_bits();
1025 if (value_lo == 0) {
1026 tmp = FrameMap::G0_opr;
1027 } else {
1028 __ set(value_lo, O7);
1029 }
1030 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
1031 int value_hi = c->as_jint_hi_bits();
1032 if (value_hi == 0) {
1033 tmp = FrameMap::G0_opr;
1034 } else {
1035 __ set(value_hi, O7);
1036 }
1037 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
1038 break;
1039 }
1040 case T_OBJECT: {
1041 jobject obj = c->as_jobject();
1042 LIR_Opr tmp;
1043 if (obj == NULL) {
1044 tmp = FrameMap::G0_opr;
1045 } else {
1046 tmp = FrameMap::O7_opr;
1047 jobject2reg(c->as_jobject(), O7);
1048 }
1049 // handle either reg+reg or reg+disp address
1050 if (addr->index()->is_valid()) {
1051 assert(addr->disp() == 0, "must be zero");
1052 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1053 } else {
1054 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1055 offset = store(tmp, base, addr->disp(), type, wide, false);
1056 }
1057
1058 break;
1059 }
1060 default:
1061 Unimplemented();
1062 }
1063 if (info != NULL) {
1064 assert(offset != -1, "offset should've been set");
1065 add_debug_info_for_null_check(offset, info);
1066 }
1067 }
1068
1069
const2reg(LIR_Opr src,LIR_Opr dest,LIR_PatchCode patch_code,CodeEmitInfo * info)1070 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1071 LIR_Const* c = src->as_constant_ptr();
1072 LIR_Opr to_reg = dest;
1073
1074 switch (c->type()) {
1075 case T_INT:
1076 case T_ADDRESS:
1077 {
1078 jint con = c->as_jint();
1079 if (to_reg->is_single_cpu()) {
1080 assert(patch_code == lir_patch_none, "no patching handled here");
1081 __ set(con, to_reg->as_register());
1082 } else {
1083 ShouldNotReachHere();
1084 assert(to_reg->is_single_fpu(), "wrong register kind");
1085
1086 __ set(con, O7);
1087 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
1088 __ st(O7, temp_slot);
1089 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
1090 }
1091 }
1092 break;
1093
1094 case T_LONG:
1095 {
1096 jlong con = c->as_jlong();
1097
1098 if (to_reg->is_double_cpu()) {
1099 __ set(con, to_reg->as_register_lo());
1100 } else if (to_reg->is_single_cpu()) {
1101 __ set(con, to_reg->as_register());
1102 } else {
1103 ShouldNotReachHere();
1104 assert(to_reg->is_double_fpu(), "wrong register kind");
1105 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
1106 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
1107 __ set(low(con), O7);
1108 __ st(O7, temp_slot_lo);
1109 __ set(high(con), O7);
1110 __ st(O7, temp_slot_hi);
1111 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
1112 }
1113 }
1114 break;
1115
1116 case T_OBJECT:
1117 {
1118 if (patch_code == lir_patch_none) {
1119 jobject2reg(c->as_jobject(), to_reg->as_register());
1120 } else {
1121 jobject2reg_with_patching(to_reg->as_register(), info);
1122 }
1123 }
1124 break;
1125
1126 case T_METADATA:
1127 {
1128 if (patch_code == lir_patch_none) {
1129 metadata2reg(c->as_metadata(), to_reg->as_register());
1130 } else {
1131 klass2reg_with_patching(to_reg->as_register(), info);
1132 }
1133 }
1134 break;
1135
1136 case T_FLOAT:
1137 {
1138 address const_addr = __ float_constant(c->as_jfloat());
1139 if (const_addr == NULL) {
1140 bailout("const section overflow");
1141 break;
1142 }
1143 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1144 AddressLiteral const_addrlit(const_addr, rspec);
1145 if (to_reg->is_single_fpu()) {
1146 __ patchable_sethi(const_addrlit, O7);
1147 __ relocate(rspec);
1148 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
1149
1150 } else {
1151 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1152
1153 __ set(const_addrlit, O7);
1154 __ ld(O7, 0, to_reg->as_register());
1155 }
1156 }
1157 break;
1158
1159 case T_DOUBLE:
1160 {
1161 address const_addr = __ double_constant(c->as_jdouble());
1162 if (const_addr == NULL) {
1163 bailout("const section overflow");
1164 break;
1165 }
1166 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1167
1168 if (to_reg->is_double_fpu()) {
1169 AddressLiteral const_addrlit(const_addr, rspec);
1170 __ patchable_sethi(const_addrlit, O7);
1171 __ relocate(rspec);
1172 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
1173 } else {
1174 assert(to_reg->is_double_cpu(), "Must be a long register.");
1175 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
1176 }
1177
1178 }
1179 break;
1180
1181 default:
1182 ShouldNotReachHere();
1183 }
1184 }
1185
as_Address(LIR_Address * addr)1186 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1187 Register reg = addr->base()->as_pointer_register();
1188 LIR_Opr index = addr->index();
1189 if (index->is_illegal()) {
1190 return Address(reg, addr->disp());
1191 } else {
1192 assert (addr->disp() == 0, "unsupported address mode");
1193 return Address(reg, index->as_pointer_register());
1194 }
1195 }
1196
1197
stack2stack(LIR_Opr src,LIR_Opr dest,BasicType type)1198 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1199 switch (type) {
1200 case T_INT:
1201 case T_FLOAT: {
1202 Register tmp = O7;
1203 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1204 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1205 __ lduw(from.base(), from.disp(), tmp);
1206 __ stw(tmp, to.base(), to.disp());
1207 break;
1208 }
1209 case T_ADDRESS:
1210 case T_OBJECT: {
1211 Register tmp = O7;
1212 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1213 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1214 __ ld_ptr(from.base(), from.disp(), tmp);
1215 __ st_ptr(tmp, to.base(), to.disp());
1216 break;
1217 }
1218 case T_LONG:
1219 case T_DOUBLE: {
1220 Register tmp = O7;
1221 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1222 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
1223 __ lduw(from.base(), from.disp(), tmp);
1224 __ stw(tmp, to.base(), to.disp());
1225 __ lduw(from.base(), from.disp() + 4, tmp);
1226 __ stw(tmp, to.base(), to.disp() + 4);
1227 break;
1228 }
1229
1230 default:
1231 ShouldNotReachHere();
1232 }
1233 }
1234
1235
as_Address_hi(LIR_Address * addr)1236 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1237 Address base = as_Address(addr);
1238 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
1239 }
1240
1241
as_Address_lo(LIR_Address * addr)1242 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1243 Address base = as_Address(addr);
1244 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
1245 }
1246
1247
mem2reg(LIR_Opr src_opr,LIR_Opr dest,BasicType type,LIR_PatchCode patch_code,CodeEmitInfo * info,bool wide,bool unaligned)1248 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1249 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1250
1251 assert(type != T_METADATA, "load of metadata ptr not supported");
1252 LIR_Address* addr = src_opr->as_address_ptr();
1253 LIR_Opr to_reg = dest;
1254
1255 Register src = addr->base()->as_pointer_register();
1256 Register disp_reg = noreg;
1257 int disp_value = addr->disp();
1258 bool needs_patching = (patch_code != lir_patch_none);
1259
1260 if (addr->base()->type() == T_OBJECT) {
1261 __ verify_oop(src);
1262 }
1263
1264 PatchingStub* patch = NULL;
1265 if (needs_patching) {
1266 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1267 assert(!to_reg->is_double_cpu() ||
1268 patch_code == lir_patch_none ||
1269 patch_code == lir_patch_normal, "patching doesn't match register");
1270 }
1271
1272 if (addr->index()->is_illegal()) {
1273 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1274 if (needs_patching) {
1275 __ patchable_set(0, O7);
1276 } else {
1277 __ set(disp_value, O7);
1278 }
1279 disp_reg = O7;
1280 }
1281 } else if (unaligned || PatchALot) {
1282 __ add(src, addr->index()->as_pointer_register(), O7);
1283 src = O7;
1284 } else {
1285 disp_reg = addr->index()->as_pointer_register();
1286 assert(disp_value == 0, "can't handle 3 operand addresses");
1287 }
1288
1289 // remember the offset of the load. The patching_epilog must be done
1290 // before the call to add_debug_info, otherwise the PcDescs don't get
1291 // entered in increasing order.
1292 int offset = code_offset();
1293
1294 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1295 if (disp_reg == noreg) {
1296 offset = load(src, disp_value, to_reg, type, wide, unaligned);
1297 } else {
1298 assert(!unaligned, "can't handle this");
1299 offset = load(src, disp_reg, to_reg, type, wide);
1300 }
1301
1302 if (patch != NULL) {
1303 patching_epilog(patch, patch_code, src, info);
1304 }
1305 if (info != NULL) add_debug_info_for_null_check(offset, info);
1306 }
1307
1308
stack2reg(LIR_Opr src,LIR_Opr dest,BasicType type)1309 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1310 Address addr;
1311 if (src->is_single_word()) {
1312 addr = frame_map()->address_for_slot(src->single_stack_ix());
1313 } else if (src->is_double_word()) {
1314 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1315 }
1316
1317 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1318 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
1319 }
1320
1321
reg2stack(LIR_Opr from_reg,LIR_Opr dest,BasicType type,bool pop_fpu_stack)1322 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1323 Address addr;
1324 if (dest->is_single_word()) {
1325 addr = frame_map()->address_for_slot(dest->single_stack_ix());
1326 } else if (dest->is_double_word()) {
1327 addr = frame_map()->address_for_slot(dest->double_stack_ix());
1328 }
1329 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1330 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
1331 }
1332
1333
reg2reg(LIR_Opr from_reg,LIR_Opr to_reg)1334 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1335 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1336 if (from_reg->is_double_fpu()) {
1337 // double to double moves
1338 assert(to_reg->is_double_fpu(), "should match");
1339 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1340 } else {
1341 // float to float moves
1342 assert(to_reg->is_single_fpu(), "should match");
1343 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1344 }
1345 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1346 if (from_reg->is_double_cpu()) {
1347 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1348 } else if (to_reg->is_double_cpu()) {
1349 // int to int moves
1350 __ mov(from_reg->as_register(), to_reg->as_register_lo());
1351 } else {
1352 // int to int moves
1353 __ mov(from_reg->as_register(), to_reg->as_register());
1354 }
1355 } else {
1356 ShouldNotReachHere();
1357 }
1358 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1359 __ verify_oop(to_reg->as_register());
1360 }
1361 }
1362
reg2mem(LIR_Opr from_reg,LIR_Opr dest,BasicType type,LIR_PatchCode patch_code,CodeEmitInfo * info,bool pop_fpu_stack,bool wide,bool unaligned)1363 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1364 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1365 bool wide, bool unaligned) {
1366 assert(type != T_METADATA, "store of metadata ptr not supported");
1367 LIR_Address* addr = dest->as_address_ptr();
1368
1369 Register src = addr->base()->as_pointer_register();
1370 Register disp_reg = noreg;
1371 int disp_value = addr->disp();
1372 bool needs_patching = (patch_code != lir_patch_none);
1373
1374 if (addr->base()->is_oop_register()) {
1375 __ verify_oop(src);
1376 }
1377
1378 PatchingStub* patch = NULL;
1379 if (needs_patching) {
1380 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1381 assert(!from_reg->is_double_cpu() ||
1382 patch_code == lir_patch_none ||
1383 patch_code == lir_patch_normal, "patching doesn't match register");
1384 }
1385
1386 if (addr->index()->is_illegal()) {
1387 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1388 if (needs_patching) {
1389 __ patchable_set(0, O7);
1390 } else {
1391 __ set(disp_value, O7);
1392 }
1393 disp_reg = O7;
1394 }
1395 } else if (unaligned || PatchALot) {
1396 __ add(src, addr->index()->as_pointer_register(), O7);
1397 src = O7;
1398 } else {
1399 disp_reg = addr->index()->as_pointer_register();
1400 assert(disp_value == 0, "can't handle 3 operand addresses");
1401 }
1402
1403 // remember the offset of the store. The patching_epilog must be done
1404 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1405 // entered in increasing order.
1406 int offset;
1407
1408 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1409 if (disp_reg == noreg) {
1410 offset = store(from_reg, src, disp_value, type, wide, unaligned);
1411 } else {
1412 assert(!unaligned, "can't handle this");
1413 offset = store(from_reg, src, disp_reg, type, wide);
1414 }
1415
1416 if (patch != NULL) {
1417 patching_epilog(patch, patch_code, src, info);
1418 }
1419
1420 if (info != NULL) add_debug_info_for_null_check(offset, info);
1421 }
1422
1423
return_op(LIR_Opr result)1424 void LIR_Assembler::return_op(LIR_Opr result) {
1425 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
1426 __ reserved_stack_check();
1427 }
1428 if (SafepointMechanism::uses_thread_local_poll()) {
1429 __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0);
1430 } else {
1431 __ set((intptr_t)os::get_polling_page(), L0);
1432 }
1433 __ relocate(relocInfo::poll_return_type);
1434 __ ld_ptr(L0, 0, G0);
1435 __ ret();
1436 __ delayed()->restore();
1437 }
1438
1439
safepoint_poll(LIR_Opr tmp,CodeEmitInfo * info)1440 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1441 if (SafepointMechanism::uses_thread_local_poll()) {
1442 __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), tmp->as_register());
1443 } else {
1444 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
1445 }
1446 if (info != NULL) {
1447 add_debug_info_for_branch(info);
1448 }
1449 int offset = __ offset();
1450
1451 __ relocate(relocInfo::poll_type);
1452 __ ld_ptr(tmp->as_register(), 0, G0);
1453 return offset;
1454 }
1455
1456
emit_static_call_stub()1457 void LIR_Assembler::emit_static_call_stub() {
1458 address call_pc = __ pc();
1459 address stub = __ start_a_stub(call_stub_size());
1460 if (stub == NULL) {
1461 bailout("static call stub overflow");
1462 return;
1463 }
1464
1465 int start = __ offset();
1466 __ relocate(static_stub_Relocation::spec(call_pc));
1467
1468 __ set_metadata(NULL, G5);
1469 // must be set to -1 at code generation time
1470 AddressLiteral addrlit(-1);
1471 __ jump_to(addrlit, G3);
1472 __ delayed()->nop();
1473
1474 assert(__ offset() - start <= call_stub_size(), "stub too big");
1475 __ end_a_stub();
1476 }
1477
1478
comp_op(LIR_Condition condition,LIR_Opr opr1,LIR_Opr opr2,LIR_Op2 * op)1479 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1480 if (opr1->is_single_fpu()) {
1481 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
1482 } else if (opr1->is_double_fpu()) {
1483 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
1484 } else if (opr1->is_single_cpu()) {
1485 if (opr2->is_constant()) {
1486 switch (opr2->as_constant_ptr()->type()) {
1487 case T_INT:
1488 { jint con = opr2->as_constant_ptr()->as_jint();
1489 if (Assembler::is_simm13(con)) {
1490 __ cmp(opr1->as_register(), con);
1491 } else {
1492 __ set(con, O7);
1493 __ cmp(opr1->as_register(), O7);
1494 }
1495 }
1496 break;
1497
1498 case T_OBJECT:
1499 // there are only equal/notequal comparisions on objects
1500 { jobject con = opr2->as_constant_ptr()->as_jobject();
1501 if (con == NULL) {
1502 __ cmp(opr1->as_register(), 0);
1503 } else {
1504 jobject2reg(con, O7);
1505 __ cmp(opr1->as_register(), O7);
1506 }
1507 }
1508 break;
1509
1510 case T_METADATA:
1511 // We only need, for now, comparison with NULL for metadata.
1512 { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1513 Metadata* m = opr2->as_constant_ptr()->as_metadata();
1514 if (m == NULL) {
1515 __ cmp(opr1->as_register(), 0);
1516 } else {
1517 ShouldNotReachHere();
1518 }
1519 }
1520 break;
1521
1522 default:
1523 ShouldNotReachHere();
1524 break;
1525 }
1526 } else {
1527 if (opr2->is_address()) {
1528 LIR_Address * addr = opr2->as_address_ptr();
1529 BasicType type = addr->type();
1530 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1531 else __ ld(as_Address(addr), O7);
1532 __ cmp(opr1->as_register(), O7);
1533 } else {
1534 __ cmp(opr1->as_register(), opr2->as_register());
1535 }
1536 }
1537 } else if (opr1->is_double_cpu()) {
1538 Register xlo = opr1->as_register_lo();
1539 Register xhi = opr1->as_register_hi();
1540 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1541 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
1542 __ orcc(xhi, G0, G0);
1543 } else if (opr2->is_register()) {
1544 Register ylo = opr2->as_register_lo();
1545 Register yhi = opr2->as_register_hi();
1546 __ cmp(xlo, ylo);
1547 } else {
1548 ShouldNotReachHere();
1549 }
1550 } else if (opr1->is_address()) {
1551 LIR_Address * addr = opr1->as_address_ptr();
1552 BasicType type = addr->type();
1553 assert (opr2->is_constant(), "Checking");
1554 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1555 else __ ld(as_Address(addr), O7);
1556 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
1557 } else {
1558 ShouldNotReachHere();
1559 }
1560 }
1561
1562
comp_fl2i(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dst,LIR_Op2 * op)1563 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1564 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1565 bool is_unordered_less = (code == lir_ucmp_fd2i);
1566 if (left->is_single_fpu()) {
1567 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1568 } else if (left->is_double_fpu()) {
1569 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1570 } else {
1571 ShouldNotReachHere();
1572 }
1573 } else if (code == lir_cmp_l2i) {
1574 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
1575 } else {
1576 ShouldNotReachHere();
1577 }
1578 }
1579
1580
cmove(LIR_Condition condition,LIR_Opr opr1,LIR_Opr opr2,LIR_Opr result,BasicType type)1581 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1582 Assembler::Condition acond;
1583 switch (condition) {
1584 case lir_cond_equal: acond = Assembler::equal; break;
1585 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1586 case lir_cond_less: acond = Assembler::less; break;
1587 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1588 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
1589 case lir_cond_greater: acond = Assembler::greater; break;
1590 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
1591 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
1592 default: ShouldNotReachHere();
1593 };
1594
1595 if (opr1->is_constant() && opr1->type() == T_INT) {
1596 Register dest = result->as_register();
1597 // load up first part of constant before branch
1598 // and do the rest in the delay slot.
1599 if (!Assembler::is_simm13(opr1->as_jint())) {
1600 __ sethi(opr1->as_jint(), dest);
1601 }
1602 } else if (opr1->is_constant()) {
1603 const2reg(opr1, result, lir_patch_none, NULL);
1604 } else if (opr1->is_register()) {
1605 reg2reg(opr1, result);
1606 } else if (opr1->is_stack()) {
1607 stack2reg(opr1, result, result->type());
1608 } else {
1609 ShouldNotReachHere();
1610 }
1611 Label skip;
1612 if (type == T_INT) {
1613 __ br(acond, false, Assembler::pt, skip);
1614 } else {
1615 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
1616 }
1617 if (opr1->is_constant() && opr1->type() == T_INT) {
1618 Register dest = result->as_register();
1619 if (Assembler::is_simm13(opr1->as_jint())) {
1620 __ delayed()->or3(G0, opr1->as_jint(), dest);
1621 } else {
1622 // the sethi has been done above, so just put in the low 10 bits
1623 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
1624 }
1625 } else {
1626 // can't do anything useful in the delay slot
1627 __ delayed()->nop();
1628 }
1629 if (opr2->is_constant()) {
1630 const2reg(opr2, result, lir_patch_none, NULL);
1631 } else if (opr2->is_register()) {
1632 reg2reg(opr2, result);
1633 } else if (opr2->is_stack()) {
1634 stack2reg(opr2, result, result->type());
1635 } else {
1636 ShouldNotReachHere();
1637 }
1638 __ bind(skip);
1639 }
1640
1641
arith_op(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dest,CodeEmitInfo * info,bool pop_fpu_stack)1642 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1643 assert(info == NULL, "unused on this code path");
1644 assert(left->is_register(), "wrong items state");
1645 assert(dest->is_register(), "wrong items state");
1646
1647 if (right->is_register()) {
1648 if (dest->is_float_kind()) {
1649
1650 FloatRegister lreg, rreg, res;
1651 FloatRegisterImpl::Width w;
1652 if (right->is_single_fpu()) {
1653 w = FloatRegisterImpl::S;
1654 lreg = left->as_float_reg();
1655 rreg = right->as_float_reg();
1656 res = dest->as_float_reg();
1657 } else {
1658 w = FloatRegisterImpl::D;
1659 lreg = left->as_double_reg();
1660 rreg = right->as_double_reg();
1661 res = dest->as_double_reg();
1662 }
1663
1664 switch (code) {
1665 case lir_add: __ fadd(w, lreg, rreg, res); break;
1666 case lir_sub: __ fsub(w, lreg, rreg, res); break;
1667 case lir_mul: // fall through
1668 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
1669 case lir_div: // fall through
1670 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
1671 default: ShouldNotReachHere();
1672 }
1673
1674 } else if (dest->is_double_cpu()) {
1675 Register dst_lo = dest->as_register_lo();
1676 Register op1_lo = left->as_pointer_register();
1677 Register op2_lo = right->as_pointer_register();
1678
1679 switch (code) {
1680 case lir_add:
1681 __ add(op1_lo, op2_lo, dst_lo);
1682 break;
1683
1684 case lir_sub:
1685 __ sub(op1_lo, op2_lo, dst_lo);
1686 break;
1687
1688 default: ShouldNotReachHere();
1689 }
1690 } else {
1691 assert (right->is_single_cpu(), "Just Checking");
1692
1693 Register lreg = left->as_register();
1694 Register res = dest->as_register();
1695 Register rreg = right->as_register();
1696 switch (code) {
1697 case lir_add: __ add (lreg, rreg, res); break;
1698 case lir_sub: __ sub (lreg, rreg, res); break;
1699 case lir_mul: __ mulx (lreg, rreg, res); break;
1700 default: ShouldNotReachHere();
1701 }
1702 }
1703 } else {
1704 assert (right->is_constant(), "must be constant");
1705
1706 if (dest->is_single_cpu()) {
1707 Register lreg = left->as_register();
1708 Register res = dest->as_register();
1709 int simm13 = right->as_constant_ptr()->as_jint();
1710
1711 switch (code) {
1712 case lir_add: __ add (lreg, simm13, res); break;
1713 case lir_sub: __ sub (lreg, simm13, res); break;
1714 case lir_mul: __ mulx (lreg, simm13, res); break;
1715 default: ShouldNotReachHere();
1716 }
1717 } else {
1718 Register lreg = left->as_pointer_register();
1719 Register res = dest->as_register_lo();
1720 long con = right->as_constant_ptr()->as_jlong();
1721 assert(Assembler::is_simm13(con), "must be simm13");
1722
1723 switch (code) {
1724 case lir_add: __ add (lreg, (int)con, res); break;
1725 case lir_sub: __ sub (lreg, (int)con, res); break;
1726 case lir_mul: __ mulx (lreg, (int)con, res); break;
1727 default: ShouldNotReachHere();
1728 }
1729 }
1730 }
1731 }
1732
1733
fpop()1734 void LIR_Assembler::fpop() {
1735 // do nothing
1736 }
1737
1738
intrinsic_op(LIR_Code code,LIR_Opr value,LIR_Opr thread,LIR_Opr dest,LIR_Op * op)1739 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1740 switch (code) {
1741 case lir_tan: {
1742 assert(thread->is_valid(), "preserve the thread object for performance reasons");
1743 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
1744 break;
1745 }
1746 case lir_sqrt: {
1747 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1748 FloatRegister src_reg = value->as_double_reg();
1749 FloatRegister dst_reg = dest->as_double_reg();
1750 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
1751 break;
1752 }
1753 case lir_abs: {
1754 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1755 FloatRegister src_reg = value->as_double_reg();
1756 FloatRegister dst_reg = dest->as_double_reg();
1757 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
1758 break;
1759 }
1760 default: {
1761 ShouldNotReachHere();
1762 break;
1763 }
1764 }
1765 }
1766
1767
logic_op(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dest)1768 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1769 if (right->is_constant()) {
1770 if (dest->is_single_cpu()) {
1771 int simm13 = right->as_constant_ptr()->as_jint();
1772 switch (code) {
1773 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
1774 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
1775 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
1776 default: ShouldNotReachHere();
1777 }
1778 } else {
1779 long c = right->as_constant_ptr()->as_jlong();
1780 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
1781 int simm13 = (int)c;
1782 switch (code) {
1783 case lir_logic_and:
1784 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
1785 break;
1786
1787 case lir_logic_or:
1788 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
1789 break;
1790
1791 case lir_logic_xor:
1792 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
1793 break;
1794
1795 default: ShouldNotReachHere();
1796 }
1797 }
1798 } else {
1799 assert(right->is_register(), "right should be in register");
1800
1801 if (dest->is_single_cpu()) {
1802 switch (code) {
1803 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
1804 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
1805 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
1806 default: ShouldNotReachHere();
1807 }
1808 } else {
1809 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
1810 left->as_register_lo();
1811 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
1812 right->as_register_lo();
1813
1814 switch (code) {
1815 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
1816 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
1817 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
1818 default: ShouldNotReachHere();
1819 }
1820 }
1821 }
1822 }
1823
1824
shift_amount(BasicType t)1825 int LIR_Assembler::shift_amount(BasicType t) {
1826 int elem_size = type2aelembytes(t);
1827 switch (elem_size) {
1828 case 1 : return 0;
1829 case 2 : return 1;
1830 case 4 : return 2;
1831 case 8 : return 3;
1832 }
1833 ShouldNotReachHere();
1834 return -1;
1835 }
1836
1837
throw_op(LIR_Opr exceptionPC,LIR_Opr exceptionOop,CodeEmitInfo * info)1838 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1839 assert(exceptionOop->as_register() == Oexception, "should match");
1840 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
1841
1842 info->add_register_oop(exceptionOop);
1843
1844 // reuse the debug info from the safepoint poll for the throw op itself
1845 address pc_for_athrow = __ pc();
1846 int pc_for_athrow_offset = __ offset();
1847 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
1848 __ set(pc_for_athrow, Oissuing_pc, rspec);
1849 add_call_info(pc_for_athrow_offset, info); // for exception handler
1850
1851 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
1852 __ delayed()->nop();
1853 }
1854
1855
unwind_op(LIR_Opr exceptionOop)1856 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1857 assert(exceptionOop->as_register() == Oexception, "should match");
1858
1859 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
1860 __ delayed()->nop();
1861 }
1862
emit_arraycopy(LIR_OpArrayCopy * op)1863 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
1864 Register src = op->src()->as_register();
1865 Register dst = op->dst()->as_register();
1866 Register src_pos = op->src_pos()->as_register();
1867 Register dst_pos = op->dst_pos()->as_register();
1868 Register length = op->length()->as_register();
1869 Register tmp = op->tmp()->as_register();
1870 Register tmp2 = O7;
1871
1872 int flags = op->flags();
1873 ciArrayKlass* default_type = op->expected_type();
1874 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
1875 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
1876
1877 // higher 32bits must be null
1878 __ sra(dst_pos, 0, dst_pos);
1879 __ sra(src_pos, 0, src_pos);
1880 __ sra(length, 0, length);
1881
1882 // set up the arraycopy stub information
1883 ArrayCopyStub* stub = op->stub();
1884
1885 // always do stub if no type information is available. it's ok if
1886 // the known type isn't loaded since the code sanity checks
1887 // in debug mode and the type isn't required when we know the exact type
1888 // also check that the type is an array type.
1889 if (op->expected_type() == NULL) {
1890 __ mov(src, O0);
1891 __ mov(src_pos, O1);
1892 __ mov(dst, O2);
1893 __ mov(dst_pos, O3);
1894 __ mov(length, O4);
1895 address copyfunc_addr = StubRoutines::generic_arraycopy();
1896 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
1897
1898 #ifndef PRODUCT
1899 if (PrintC1Statistics) {
1900 address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
1901 __ inc_counter(counter, G1, G3);
1902 }
1903 #endif
1904 __ call_VM_leaf(tmp, copyfunc_addr);
1905
1906 __ xor3(O0, -1, tmp);
1907 __ sub(length, tmp, length);
1908 __ add(src_pos, tmp, src_pos);
1909 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
1910 __ delayed()->add(dst_pos, tmp, dst_pos);
1911 __ bind(*stub->continuation());
1912 return;
1913 }
1914
1915 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
1916
1917 // make sure src and dst are non-null and load array length
1918 if (flags & LIR_OpArrayCopy::src_null_check) {
1919 __ tst(src);
1920 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
1921 __ delayed()->nop();
1922 }
1923
1924 if (flags & LIR_OpArrayCopy::dst_null_check) {
1925 __ tst(dst);
1926 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
1927 __ delayed()->nop();
1928 }
1929
1930 // If the compiler was not able to prove that exact type of the source or the destination
1931 // of the arraycopy is an array type, check at runtime if the source or the destination is
1932 // an instance type.
1933 if (flags & LIR_OpArrayCopy::type_check) {
1934 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
1935 __ load_klass(dst, tmp);
1936 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2);
1937 __ cmp(tmp2, Klass::_lh_neutral_value);
1938 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry());
1939 __ delayed()->nop();
1940 }
1941
1942 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
1943 __ load_klass(src, tmp);
1944 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2);
1945 __ cmp(tmp2, Klass::_lh_neutral_value);
1946 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry());
1947 __ delayed()->nop();
1948 }
1949 }
1950
1951 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
1952 // test src_pos register
1953 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());
1954 __ delayed()->nop();
1955 }
1956
1957 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
1958 // test dst_pos register
1959 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry());
1960 __ delayed()->nop();
1961 }
1962
1963 if (flags & LIR_OpArrayCopy::length_positive_check) {
1964 // make sure length isn't negative
1965 __ cmp_zero_and_br(Assembler::less, length, *stub->entry());
1966 __ delayed()->nop();
1967 }
1968
1969 if (flags & LIR_OpArrayCopy::src_range_check) {
1970 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
1971 __ add(length, src_pos, tmp);
1972 __ cmp(tmp2, tmp);
1973 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
1974 __ delayed()->nop();
1975 }
1976
1977 if (flags & LIR_OpArrayCopy::dst_range_check) {
1978 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
1979 __ add(length, dst_pos, tmp);
1980 __ cmp(tmp2, tmp);
1981 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
1982 __ delayed()->nop();
1983 }
1984
1985 int shift = shift_amount(basic_type);
1986
1987 if (flags & LIR_OpArrayCopy::type_check) {
1988 // We don't know the array types are compatible
1989 if (basic_type != T_OBJECT) {
1990 // Simple test for basic type arrays
1991 if (UseCompressedClassPointers) {
1992 // We don't need decode because we just need to compare
1993 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
1994 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
1995 __ cmp(tmp, tmp2);
1996 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
1997 } else {
1998 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
1999 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2000 __ cmp(tmp, tmp2);
2001 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2002 }
2003 __ delayed()->nop();
2004 } else {
2005 // For object arrays, if src is a sub class of dst then we can
2006 // safely do the copy.
2007 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2008
2009 Label cont, slow;
2010 assert_different_registers(tmp, tmp2, G3, G1);
2011
2012 __ load_klass(src, G3);
2013 __ load_klass(dst, G1);
2014
2015 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
2016
2017 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2018 __ delayed()->nop();
2019
2020 __ cmp(G3, 0);
2021 if (copyfunc_addr != NULL) { // use stub if available
2022 // src is not a sub class of dst so we have to do a
2023 // per-element check.
2024 __ br(Assembler::notEqual, false, Assembler::pt, cont);
2025 __ delayed()->nop();
2026
2027 __ bind(slow);
2028
2029 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2030 if ((flags & mask) != mask) {
2031 // Check that at least both of them object arrays.
2032 assert(flags & mask, "one of the two should be known to be an object array");
2033
2034 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2035 __ load_klass(src, tmp);
2036 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2037 __ load_klass(dst, tmp);
2038 }
2039 int lh_offset = in_bytes(Klass::layout_helper_offset());
2040
2041 __ lduw(tmp, lh_offset, tmp2);
2042
2043 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2044 __ set(objArray_lh, tmp);
2045 __ cmp(tmp, tmp2);
2046 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2047 __ delayed()->nop();
2048 }
2049
2050 Register src_ptr = O0;
2051 Register dst_ptr = O1;
2052 Register len = O2;
2053 Register chk_off = O3;
2054 Register super_k = O4;
2055
2056 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2057 if (shift == 0) {
2058 __ add(src_ptr, src_pos, src_ptr);
2059 } else {
2060 __ sll(src_pos, shift, tmp);
2061 __ add(src_ptr, tmp, src_ptr);
2062 }
2063
2064 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2065 if (shift == 0) {
2066 __ add(dst_ptr, dst_pos, dst_ptr);
2067 } else {
2068 __ sll(dst_pos, shift, tmp);
2069 __ add(dst_ptr, tmp, dst_ptr);
2070 }
2071 __ mov(length, len);
2072 __ load_klass(dst, tmp);
2073
2074 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2075 __ ld_ptr(tmp, ek_offset, super_k);
2076
2077 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2078 __ lduw(super_k, sco_offset, chk_off);
2079
2080 __ call_VM_leaf(tmp, copyfunc_addr);
2081
2082 #ifndef PRODUCT
2083 if (PrintC1Statistics) {
2084 Label failed;
2085 __ br_notnull_short(O0, Assembler::pn, failed);
2086 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
2087 __ bind(failed);
2088 }
2089 #endif
2090
2091 __ br_null(O0, false, Assembler::pt, *stub->continuation());
2092 __ delayed()->xor3(O0, -1, tmp);
2093
2094 #ifndef PRODUCT
2095 if (PrintC1Statistics) {
2096 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3);
2097 }
2098 #endif
2099
2100 __ sub(length, tmp, length);
2101 __ add(src_pos, tmp, src_pos);
2102 __ br(Assembler::always, false, Assembler::pt, *stub->entry());
2103 __ delayed()->add(dst_pos, tmp, dst_pos);
2104
2105 __ bind(cont);
2106 } else {
2107 __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2108 __ delayed()->nop();
2109 __ bind(cont);
2110 }
2111 }
2112 }
2113
2114 #ifdef ASSERT
2115 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2116 // Sanity check the known type with the incoming class. For the
2117 // primitive case the types must match exactly with src.klass and
2118 // dst.klass each exactly matching the default type. For the
2119 // object array case, if no type check is needed then either the
2120 // dst type is exactly the expected type and the src type is a
2121 // subtype which we can't check or src is the same array as dst
2122 // but not necessarily exactly of type default_type.
2123 Label known_ok, halt;
2124 metadata2reg(op->expected_type()->constant_encoding(), tmp);
2125 if (UseCompressedClassPointers) {
2126 // tmp holds the default type. It currently comes uncompressed after the
2127 // load of a constant, so encode it.
2128 __ encode_klass_not_null(tmp);
2129 // load the raw value of the dst klass, since we will be comparing
2130 // uncompressed values directly.
2131 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2132 if (basic_type != T_OBJECT) {
2133 __ cmp(tmp, tmp2);
2134 __ br(Assembler::notEqual, false, Assembler::pn, halt);
2135 // load the raw value of the src klass.
2136 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
2137 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2138 } else {
2139 __ cmp(tmp, tmp2);
2140 __ br(Assembler::equal, false, Assembler::pn, known_ok);
2141 __ delayed()->cmp(src, dst);
2142 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2143 __ delayed()->nop();
2144 }
2145 } else {
2146 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2147 if (basic_type != T_OBJECT) {
2148 __ cmp(tmp, tmp2);
2149 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
2150 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
2151 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
2152 } else {
2153 __ cmp(tmp, tmp2);
2154 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2155 __ delayed()->cmp(src, dst);
2156 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2157 __ delayed()->nop();
2158 }
2159 }
2160 __ bind(halt);
2161 __ stop("incorrect type information in arraycopy");
2162 __ bind(known_ok);
2163 }
2164 #endif
2165
2166 #ifndef PRODUCT
2167 if (PrintC1Statistics) {
2168 address counter = Runtime1::arraycopy_count_address(basic_type);
2169 __ inc_counter(counter, G1, G3);
2170 }
2171 #endif
2172
2173 Register src_ptr = O0;
2174 Register dst_ptr = O1;
2175 Register len = O2;
2176
2177 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2178 if (shift == 0) {
2179 __ add(src_ptr, src_pos, src_ptr);
2180 } else {
2181 __ sll(src_pos, shift, tmp);
2182 __ add(src_ptr, tmp, src_ptr);
2183 }
2184
2185 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2186 if (shift == 0) {
2187 __ add(dst_ptr, dst_pos, dst_ptr);
2188 } else {
2189 __ sll(dst_pos, shift, tmp);
2190 __ add(dst_ptr, tmp, dst_ptr);
2191 }
2192
2193 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2194 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2195 const char *name;
2196 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2197
2198 // arraycopy stubs takes a length in number of elements, so don't scale it.
2199 __ mov(length, len);
2200 __ call_VM_leaf(tmp, entry);
2201
2202 __ bind(*stub->continuation());
2203 }
2204
2205
shift_op(LIR_Code code,LIR_Opr left,LIR_Opr count,LIR_Opr dest,LIR_Opr tmp)2206 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2207 if (dest->is_single_cpu()) {
2208 if (left->type() == T_OBJECT) {
2209 switch (code) {
2210 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
2211 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
2212 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2213 default: ShouldNotReachHere();
2214 }
2215 } else
2216 switch (code) {
2217 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
2218 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
2219 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2220 default: ShouldNotReachHere();
2221 }
2222 } else {
2223 switch (code) {
2224 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2225 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2226 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2227 default: ShouldNotReachHere();
2228 }
2229 }
2230 }
2231
2232
shift_op(LIR_Code code,LIR_Opr left,jint count,LIR_Opr dest)2233 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2234 if (left->type() == T_OBJECT) {
2235 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
2236 Register l = left->as_register();
2237 Register d = dest->as_register_lo();
2238 switch (code) {
2239 case lir_shl: __ sllx (l, count, d); break;
2240 case lir_shr: __ srax (l, count, d); break;
2241 case lir_ushr: __ srlx (l, count, d); break;
2242 default: ShouldNotReachHere();
2243 }
2244 return;
2245 }
2246
2247 if (dest->is_single_cpu()) {
2248 count = count & 0x1F; // Java spec
2249 switch (code) {
2250 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
2251 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
2252 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
2253 default: ShouldNotReachHere();
2254 }
2255 } else if (dest->is_double_cpu()) {
2256 count = count & 63; // Java spec
2257 switch (code) {
2258 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2259 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2260 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2261 default: ShouldNotReachHere();
2262 }
2263 } else {
2264 ShouldNotReachHere();
2265 }
2266 }
2267
2268
emit_alloc_obj(LIR_OpAllocObj * op)2269 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2270 assert(op->tmp1()->as_register() == G1 &&
2271 op->tmp2()->as_register() == G3 &&
2272 op->tmp3()->as_register() == G4 &&
2273 op->obj()->as_register() == O0 &&
2274 op->klass()->as_register() == G5, "must be");
2275 if (op->init_check()) {
2276 add_debug_info_for_null_check_here(op->stub()->info());
2277 __ ldub(op->klass()->as_register(),
2278 in_bytes(InstanceKlass::init_state_offset()),
2279 op->tmp1()->as_register());
2280 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2281 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
2282 __ delayed()->nop();
2283 }
2284 __ allocate_object(op->obj()->as_register(),
2285 op->tmp1()->as_register(),
2286 op->tmp2()->as_register(),
2287 op->tmp3()->as_register(),
2288 op->header_size(),
2289 op->object_size(),
2290 op->klass()->as_register(),
2291 *op->stub()->entry());
2292 __ bind(*op->stub()->continuation());
2293 __ verify_oop(op->obj()->as_register());
2294 }
2295
2296
emit_alloc_array(LIR_OpAllocArray * op)2297 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2298 assert(op->tmp1()->as_register() == G1 &&
2299 op->tmp2()->as_register() == G3 &&
2300 op->tmp3()->as_register() == G4 &&
2301 op->tmp4()->as_register() == O1 &&
2302 op->klass()->as_register() == G5, "must be");
2303
2304 __ signx(op->len()->as_register());
2305 if (UseSlowPath ||
2306 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2307 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2308 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2309 __ delayed()->nop();
2310 } else {
2311 __ allocate_array(op->obj()->as_register(),
2312 op->len()->as_register(),
2313 op->tmp1()->as_register(),
2314 op->tmp2()->as_register(),
2315 op->tmp3()->as_register(),
2316 arrayOopDesc::header_size(op->type()),
2317 type2aelembytes(op->type()),
2318 op->klass()->as_register(),
2319 *op->stub()->entry());
2320 }
2321 __ bind(*op->stub()->continuation());
2322 }
2323
2324
type_profile_helper(Register mdo,int mdo_offset_bias,ciMethodData * md,ciProfileData * data,Register recv,Register tmp1,Label * update_done)2325 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2326 ciMethodData *md, ciProfileData *data,
2327 Register recv, Register tmp1, Label* update_done) {
2328 uint i;
2329 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2330 Label next_test;
2331 // See if the receiver is receiver[n].
2332 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2333 mdo_offset_bias);
2334 __ ld_ptr(receiver_addr, tmp1);
2335 __ verify_klass_ptr(tmp1);
2336 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
2337 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2338 mdo_offset_bias);
2339 __ ld_ptr(data_addr, tmp1);
2340 __ add(tmp1, DataLayout::counter_increment, tmp1);
2341 __ st_ptr(tmp1, data_addr);
2342 __ ba(*update_done);
2343 __ delayed()->nop();
2344 __ bind(next_test);
2345 }
2346
2347 // Didn't find receiver; find next empty slot and fill it in
2348 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2349 Label next_test;
2350 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2351 mdo_offset_bias);
2352 __ ld_ptr(recv_addr, tmp1);
2353 __ br_notnull_short(tmp1, Assembler::pt, next_test);
2354 __ st_ptr(recv, recv_addr);
2355 __ set(DataLayout::counter_increment, tmp1);
2356 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2357 mdo_offset_bias);
2358 __ ba(*update_done);
2359 __ delayed()->nop();
2360 __ bind(next_test);
2361 }
2362 }
2363
2364
setup_md_access(ciMethod * method,int bci,ciMethodData * & md,ciProfileData * & data,int & mdo_offset_bias)2365 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2366 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2367 md = method->method_data_or_null();
2368 assert(md != NULL, "Sanity");
2369 data = md->bci_to_data(bci);
2370 assert(data != NULL, "need data for checkcast");
2371 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2372 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2373 // The offset is large so bias the mdo by the base of the slot so
2374 // that the ld can use simm13s to reference the slots of the data
2375 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2376 }
2377 }
2378
emit_typecheck_helper(LIR_OpTypeCheck * op,Label * success,Label * failure,Label * obj_is_null)2379 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2380 // we always need a stub for the failure case.
2381 CodeStub* stub = op->stub();
2382 Register obj = op->object()->as_register();
2383 Register k_RInfo = op->tmp1()->as_register();
2384 Register klass_RInfo = op->tmp2()->as_register();
2385 Register dst = op->result_opr()->as_register();
2386 Register Rtmp1 = op->tmp3()->as_register();
2387 ciKlass* k = op->klass();
2388
2389
2390 if (obj == k_RInfo) {
2391 k_RInfo = klass_RInfo;
2392 klass_RInfo = obj;
2393 }
2394
2395 ciMethodData* md;
2396 ciProfileData* data;
2397 int mdo_offset_bias = 0;
2398 if (op->should_profile()) {
2399 ciMethod* method = op->profiled_method();
2400 assert(method != NULL, "Should have method");
2401 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2402
2403 Label not_null;
2404 __ br_notnull_short(obj, Assembler::pn, not_null);
2405 Register mdo = k_RInfo;
2406 Register data_val = Rtmp1;
2407 metadata2reg(md->constant_encoding(), mdo);
2408 if (mdo_offset_bias > 0) {
2409 __ set(mdo_offset_bias, data_val);
2410 __ add(mdo, data_val, mdo);
2411 }
2412 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2413 __ ldub(flags_addr, data_val);
2414 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2415 __ stb(data_val, flags_addr);
2416 __ ba(*obj_is_null);
2417 __ delayed()->nop();
2418 __ bind(not_null);
2419 } else {
2420 __ br_null(obj, false, Assembler::pn, *obj_is_null);
2421 __ delayed()->nop();
2422 }
2423
2424 Label profile_cast_failure, profile_cast_success;
2425 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
2426 Label *success_target = op->should_profile() ? &profile_cast_success : success;
2427
2428 // patching may screw with our temporaries on sparc,
2429 // so let's do it before loading the class
2430 if (k->is_loaded()) {
2431 metadata2reg(k->constant_encoding(), k_RInfo);
2432 } else {
2433 klass2reg_with_patching(k_RInfo, op->info_for_patch());
2434 }
2435 assert(obj != k_RInfo, "must be different");
2436
2437 // get object class
2438 // not a safepoint as obj null check happens earlier
2439 __ load_klass(obj, klass_RInfo);
2440 if (op->fast_check()) {
2441 assert_different_registers(klass_RInfo, k_RInfo);
2442 __ cmp(k_RInfo, klass_RInfo);
2443 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
2444 __ delayed()->nop();
2445 } else {
2446 bool need_slow_path = true;
2447 if (k->is_loaded()) {
2448 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
2449 need_slow_path = false;
2450 // perform the fast part of the checking logic
2451 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
2452 (need_slow_path ? success_target : NULL),
2453 failure_target, NULL,
2454 RegisterOrConstant(k->super_check_offset()));
2455 } else {
2456 // perform the fast part of the checking logic
2457 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
2458 failure_target, NULL);
2459 }
2460 if (need_slow_path) {
2461 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2462 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2463 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2464 __ delayed()->nop();
2465 __ cmp(G3, 0);
2466 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2467 __ delayed()->nop();
2468 // Fall through to success case
2469 }
2470 }
2471
2472 if (op->should_profile()) {
2473 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2474 assert_different_registers(obj, mdo, recv, tmp1);
2475 __ bind(profile_cast_success);
2476 metadata2reg(md->constant_encoding(), mdo);
2477 if (mdo_offset_bias > 0) {
2478 __ set(mdo_offset_bias, tmp1);
2479 __ add(mdo, tmp1, mdo);
2480 }
2481 __ load_klass(obj, recv);
2482 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
2483 // Jump over the failure case
2484 __ ba(*success);
2485 __ delayed()->nop();
2486 // Cast failure case
2487 __ bind(profile_cast_failure);
2488 metadata2reg(md->constant_encoding(), mdo);
2489 if (mdo_offset_bias > 0) {
2490 __ set(mdo_offset_bias, tmp1);
2491 __ add(mdo, tmp1, mdo);
2492 }
2493 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2494 __ ld_ptr(data_addr, tmp1);
2495 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2496 __ st_ptr(tmp1, data_addr);
2497 __ ba(*failure);
2498 __ delayed()->nop();
2499 }
2500 __ ba(*success);
2501 __ delayed()->nop();
2502 }
2503
emit_opTypeCheck(LIR_OpTypeCheck * op)2504 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2505 LIR_Code code = op->code();
2506 if (code == lir_store_check) {
2507 Register value = op->object()->as_register();
2508 Register array = op->array()->as_register();
2509 Register k_RInfo = op->tmp1()->as_register();
2510 Register klass_RInfo = op->tmp2()->as_register();
2511 Register Rtmp1 = op->tmp3()->as_register();
2512
2513 __ verify_oop(value);
2514 CodeStub* stub = op->stub();
2515 // check if it needs to be profiled
2516 ciMethodData* md;
2517 ciProfileData* data;
2518 int mdo_offset_bias = 0;
2519 if (op->should_profile()) {
2520 ciMethod* method = op->profiled_method();
2521 assert(method != NULL, "Should have method");
2522 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2523 }
2524 Label profile_cast_success, profile_cast_failure, done;
2525 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
2526 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
2527
2528 if (op->should_profile()) {
2529 Label not_null;
2530 __ br_notnull_short(value, Assembler::pn, not_null);
2531 Register mdo = k_RInfo;
2532 Register data_val = Rtmp1;
2533 metadata2reg(md->constant_encoding(), mdo);
2534 if (mdo_offset_bias > 0) {
2535 __ set(mdo_offset_bias, data_val);
2536 __ add(mdo, data_val, mdo);
2537 }
2538 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2539 __ ldub(flags_addr, data_val);
2540 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2541 __ stb(data_val, flags_addr);
2542 __ ba_short(done);
2543 __ bind(not_null);
2544 } else {
2545 __ br_null_short(value, Assembler::pn, done);
2546 }
2547 add_debug_info_for_null_check_here(op->info_for_exception());
2548 __ load_klass(array, k_RInfo);
2549 __ load_klass(value, klass_RInfo);
2550
2551 // get instance klass
2552 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo);
2553 // perform the fast part of the checking logic
2554 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
2555
2556 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2557 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2558 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2559 __ delayed()->nop();
2560 __ cmp(G3, 0);
2561 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2562 __ delayed()->nop();
2563 // fall through to the success case
2564
2565 if (op->should_profile()) {
2566 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2567 assert_different_registers(value, mdo, recv, tmp1);
2568 __ bind(profile_cast_success);
2569 metadata2reg(md->constant_encoding(), mdo);
2570 if (mdo_offset_bias > 0) {
2571 __ set(mdo_offset_bias, tmp1);
2572 __ add(mdo, tmp1, mdo);
2573 }
2574 __ load_klass(value, recv);
2575 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2576 __ ba_short(done);
2577 // Cast failure case
2578 __ bind(profile_cast_failure);
2579 metadata2reg(md->constant_encoding(), mdo);
2580 if (mdo_offset_bias > 0) {
2581 __ set(mdo_offset_bias, tmp1);
2582 __ add(mdo, tmp1, mdo);
2583 }
2584 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2585 __ ld_ptr(data_addr, tmp1);
2586 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2587 __ st_ptr(tmp1, data_addr);
2588 __ ba(*stub->entry());
2589 __ delayed()->nop();
2590 }
2591 __ bind(done);
2592 } else if (code == lir_checkcast) {
2593 Register obj = op->object()->as_register();
2594 Register dst = op->result_opr()->as_register();
2595 Label success;
2596 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
2597 __ bind(success);
2598 __ mov(obj, dst);
2599 } else if (code == lir_instanceof) {
2600 Register obj = op->object()->as_register();
2601 Register dst = op->result_opr()->as_register();
2602 Label success, failure, done;
2603 emit_typecheck_helper(op, &success, &failure, &failure);
2604 __ bind(failure);
2605 __ set(0, dst);
2606 __ ba_short(done);
2607 __ bind(success);
2608 __ set(1, dst);
2609 __ bind(done);
2610 } else {
2611 ShouldNotReachHere();
2612 }
2613
2614 }
2615
2616
emit_compare_and_swap(LIR_OpCompareAndSwap * op)2617 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2618 if (op->code() == lir_cas_long) {
2619 assert(VM_Version::supports_cx8(), "wrong machine");
2620 Register addr = op->addr()->as_pointer_register();
2621 Register cmp_value_lo = op->cmp_value()->as_register_lo();
2622 Register cmp_value_hi = op->cmp_value()->as_register_hi();
2623 Register new_value_lo = op->new_value()->as_register_lo();
2624 Register new_value_hi = op->new_value()->as_register_hi();
2625 Register t1 = op->tmp1()->as_register();
2626 Register t2 = op->tmp2()->as_register();
2627 __ mov(cmp_value_lo, t1);
2628 __ mov(new_value_lo, t2);
2629 // perform the compare and swap operation
2630 __ casx(addr, t1, t2);
2631 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2632 // overwritten with the original value in "addr" and will be equal to t1.
2633 __ cmp(t1, t2);
2634 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2635 Register addr = op->addr()->as_pointer_register();
2636 Register cmp_value = op->cmp_value()->as_register();
2637 Register new_value = op->new_value()->as_register();
2638 Register t1 = op->tmp1()->as_register();
2639 Register t2 = op->tmp2()->as_register();
2640 __ mov(cmp_value, t1);
2641 __ mov(new_value, t2);
2642 if (op->code() == lir_cas_obj) {
2643 if (UseCompressedOops) {
2644 __ encode_heap_oop(t1);
2645 __ encode_heap_oop(t2);
2646 __ cas(addr, t1, t2);
2647 } else {
2648 __ cas_ptr(addr, t1, t2);
2649 }
2650 } else {
2651 __ cas(addr, t1, t2);
2652 }
2653 __ cmp(t1, t2);
2654 } else {
2655 Unimplemented();
2656 }
2657 }
2658
set_24bit_FPU()2659 void LIR_Assembler::set_24bit_FPU() {
2660 Unimplemented();
2661 }
2662
2663
reset_FPU()2664 void LIR_Assembler::reset_FPU() {
2665 Unimplemented();
2666 }
2667
2668
breakpoint()2669 void LIR_Assembler::breakpoint() {
2670 __ breakpoint_trap();
2671 }
2672
2673
push(LIR_Opr opr)2674 void LIR_Assembler::push(LIR_Opr opr) {
2675 Unimplemented();
2676 }
2677
2678
pop(LIR_Opr opr)2679 void LIR_Assembler::pop(LIR_Opr opr) {
2680 Unimplemented();
2681 }
2682
2683
monitor_address(int monitor_no,LIR_Opr dst_opr)2684 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2685 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2686 Register dst = dst_opr->as_register();
2687 Register reg = mon_addr.base();
2688 int offset = mon_addr.disp();
2689 // compute pointer to BasicLock
2690 if (mon_addr.is_simm13()) {
2691 __ add(reg, offset, dst);
2692 } else {
2693 __ set(offset, dst);
2694 __ add(dst, reg, dst);
2695 }
2696 }
2697
emit_updatecrc32(LIR_OpUpdateCRC32 * op)2698 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2699 assert(op->crc()->is_single_cpu(), "crc must be register");
2700 assert(op->val()->is_single_cpu(), "byte value must be register");
2701 assert(op->result_opr()->is_single_cpu(), "result must be register");
2702 Register crc = op->crc()->as_register();
2703 Register val = op->val()->as_register();
2704 Register table = op->result_opr()->as_register();
2705 Register res = op->result_opr()->as_register();
2706
2707 assert_different_registers(val, crc, table);
2708
2709 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
2710 __ not1(crc);
2711 __ clruwu(crc);
2712 __ update_byte_crc32(crc, val, table);
2713 __ not1(crc);
2714
2715 __ mov(crc, res);
2716 }
2717
emit_lock(LIR_OpLock * op)2718 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2719 Register obj = op->obj_opr()->as_register();
2720 Register hdr = op->hdr_opr()->as_register();
2721 Register lock = op->lock_opr()->as_register();
2722
2723 // obj may not be an oop
2724 if (op->code() == lir_lock) {
2725 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2726 if (UseFastLocking) {
2727 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2728 // add debug info for NullPointerException only if one is possible
2729 if (op->info() != NULL) {
2730 add_debug_info_for_null_check_here(op->info());
2731 }
2732 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2733 } else {
2734 // always do slow locking
2735 // note: the slow locking code could be inlined here, however if we use
2736 // slow locking, speed doesn't matter anyway and this solution is
2737 // simpler and requires less duplicated code - additionally, the
2738 // slow locking code is the same in either case which simplifies
2739 // debugging
2740 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2741 __ delayed()->nop();
2742 }
2743 } else {
2744 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2745 if (UseFastLocking) {
2746 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2747 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2748 } else {
2749 // always do slow unlocking
2750 // note: the slow unlocking code could be inlined here, however if we use
2751 // slow unlocking, speed doesn't matter anyway and this solution is
2752 // simpler and requires less duplicated code - additionally, the
2753 // slow unlocking code is the same in either case which simplifies
2754 // debugging
2755 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2756 __ delayed()->nop();
2757 }
2758 }
2759 __ bind(*op->stub()->continuation());
2760 }
2761
2762
emit_profile_call(LIR_OpProfileCall * op)2763 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2764 ciMethod* method = op->profiled_method();
2765 int bci = op->profiled_bci();
2766 ciMethod* callee = op->profiled_callee();
2767
2768 // Update counter for all call types
2769 ciMethodData* md = method->method_data_or_null();
2770 assert(md != NULL, "Sanity");
2771 ciProfileData* data = md->bci_to_data(bci);
2772 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2773 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2774 Register mdo = op->mdo()->as_register();
2775 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2776 Register tmp1 = op->tmp1()->as_register_lo();
2777 metadata2reg(md->constant_encoding(), mdo);
2778 int mdo_offset_bias = 0;
2779 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2780 data->size_in_bytes())) {
2781 // The offset is large so bias the mdo by the base of the slot so
2782 // that the ld can use simm13s to reference the slots of the data
2783 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2784 __ set(mdo_offset_bias, O7);
2785 __ add(mdo, O7, mdo);
2786 }
2787
2788 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2789 // Perform additional virtual call profiling for invokevirtual and
2790 // invokeinterface bytecodes
2791 if (op->should_profile_receiver_type()) {
2792 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2793 Register recv = op->recv()->as_register();
2794 assert_different_registers(mdo, tmp1, recv);
2795 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2796 ciKlass* known_klass = op->known_holder();
2797 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2798 // We know the type that will be seen at this call site; we can
2799 // statically update the MethodData* rather than needing to do
2800 // dynamic tests on the receiver type
2801
2802 // NOTE: we should probably put a lock around this search to
2803 // avoid collisions by concurrent compilations
2804 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2805 uint i;
2806 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2807 ciKlass* receiver = vc_data->receiver(i);
2808 if (known_klass->equals(receiver)) {
2809 Address data_addr(mdo, md->byte_offset_of_slot(data,
2810 VirtualCallData::receiver_count_offset(i)) -
2811 mdo_offset_bias);
2812 __ ld_ptr(data_addr, tmp1);
2813 __ add(tmp1, DataLayout::counter_increment, tmp1);
2814 __ st_ptr(tmp1, data_addr);
2815 return;
2816 }
2817 }
2818
2819 // Receiver type not found in profile data; select an empty slot
2820
2821 // Note that this is less efficient than it should be because it
2822 // always does a write to the receiver part of the
2823 // VirtualCallData rather than just the first time
2824 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2825 ciKlass* receiver = vc_data->receiver(i);
2826 if (receiver == NULL) {
2827 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2828 mdo_offset_bias);
2829 metadata2reg(known_klass->constant_encoding(), tmp1);
2830 __ st_ptr(tmp1, recv_addr);
2831 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2832 mdo_offset_bias);
2833 __ ld_ptr(data_addr, tmp1);
2834 __ add(tmp1, DataLayout::counter_increment, tmp1);
2835 __ st_ptr(tmp1, data_addr);
2836 return;
2837 }
2838 }
2839 } else {
2840 __ load_klass(recv, recv);
2841 Label update_done;
2842 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2843 // Receiver did not match any saved receiver and there is no empty row for it.
2844 // Increment total counter to indicate polymorphic case.
2845 __ ld_ptr(counter_addr, tmp1);
2846 __ add(tmp1, DataLayout::counter_increment, tmp1);
2847 __ st_ptr(tmp1, counter_addr);
2848
2849 __ bind(update_done);
2850 }
2851 } else {
2852 // Static call
2853 __ ld_ptr(counter_addr, tmp1);
2854 __ add(tmp1, DataLayout::counter_increment, tmp1);
2855 __ st_ptr(tmp1, counter_addr);
2856 }
2857 }
2858
emit_profile_type(LIR_OpProfileType * op)2859 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2860 Register obj = op->obj()->as_register();
2861 Register tmp1 = op->tmp()->as_pointer_register();
2862 Register tmp2 = G1;
2863 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2864 ciKlass* exact_klass = op->exact_klass();
2865 intptr_t current_klass = op->current_klass();
2866 bool not_null = op->not_null();
2867 bool no_conflict = op->no_conflict();
2868
2869 Label update, next, none;
2870
2871 bool do_null = !not_null;
2872 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2873 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2874
2875 assert(do_null || do_update, "why are we here?");
2876 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2877
2878 __ verify_oop(obj);
2879
2880 if (tmp1 != obj) {
2881 __ mov(obj, tmp1);
2882 }
2883 if (do_null) {
2884 __ br_notnull_short(tmp1, Assembler::pt, update);
2885 if (!TypeEntries::was_null_seen(current_klass)) {
2886 __ ld_ptr(mdo_addr, tmp1);
2887 __ or3(tmp1, TypeEntries::null_seen, tmp1);
2888 __ st_ptr(tmp1, mdo_addr);
2889 }
2890 if (do_update) {
2891 __ ba(next);
2892 __ delayed()->nop();
2893 }
2894 #ifdef ASSERT
2895 } else {
2896 __ br_notnull_short(tmp1, Assembler::pt, update);
2897 __ stop("unexpect null obj");
2898 #endif
2899 }
2900
2901 __ bind(update);
2902
2903 if (do_update) {
2904 #ifdef ASSERT
2905 if (exact_klass != NULL) {
2906 Label ok;
2907 __ load_klass(tmp1, tmp1);
2908 metadata2reg(exact_klass->constant_encoding(), tmp2);
2909 __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok);
2910 __ stop("exact klass and actual klass differ");
2911 __ bind(ok);
2912 }
2913 #endif
2914
2915 Label do_update;
2916 __ ld_ptr(mdo_addr, tmp2);
2917
2918 if (!no_conflict) {
2919 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2920 if (exact_klass != NULL) {
2921 metadata2reg(exact_klass->constant_encoding(), tmp1);
2922 } else {
2923 __ load_klass(tmp1, tmp1);
2924 }
2925
2926 __ xor3(tmp1, tmp2, tmp1);
2927 __ btst(TypeEntries::type_klass_mask, tmp1);
2928 // klass seen before, nothing to do. The unknown bit may have been
2929 // set already but no need to check.
2930 __ brx(Assembler::zero, false, Assembler::pt, next);
2931 __ delayed()->
2932
2933 btst(TypeEntries::type_unknown, tmp1);
2934 // already unknown. Nothing to do anymore.
2935 __ brx(Assembler::notZero, false, Assembler::pt, next);
2936
2937 if (TypeEntries::is_type_none(current_klass)) {
2938 __ delayed()->btst(TypeEntries::type_mask, tmp2);
2939 __ brx(Assembler::zero, true, Assembler::pt, do_update);
2940 // first time here. Set profile type.
2941 __ delayed()->or3(tmp2, tmp1, tmp2);
2942 } else {
2943 __ delayed()->nop();
2944 }
2945 } else {
2946 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2947 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2948
2949 __ btst(TypeEntries::type_unknown, tmp2);
2950 // already unknown. Nothing to do anymore.
2951 __ brx(Assembler::notZero, false, Assembler::pt, next);
2952 __ delayed()->nop();
2953 }
2954
2955 // different than before. Cannot keep accurate profile.
2956 __ or3(tmp2, TypeEntries::type_unknown, tmp2);
2957 } else {
2958 // There's a single possible klass at this profile point
2959 assert(exact_klass != NULL, "should be");
2960 if (TypeEntries::is_type_none(current_klass)) {
2961 metadata2reg(exact_klass->constant_encoding(), tmp1);
2962 __ xor3(tmp1, tmp2, tmp1);
2963 __ btst(TypeEntries::type_klass_mask, tmp1);
2964 __ brx(Assembler::zero, false, Assembler::pt, next);
2965 #ifdef ASSERT
2966
2967 {
2968 Label ok;
2969 __ delayed()->btst(TypeEntries::type_mask, tmp2);
2970 __ brx(Assembler::zero, true, Assembler::pt, ok);
2971 __ delayed()->nop();
2972
2973 __ stop("unexpected profiling mismatch");
2974 __ bind(ok);
2975 }
2976 // first time here. Set profile type.
2977 __ or3(tmp2, tmp1, tmp2);
2978 #else
2979 // first time here. Set profile type.
2980 __ delayed()->or3(tmp2, tmp1, tmp2);
2981 #endif
2982
2983 } else {
2984 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2985 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2986
2987 // already unknown. Nothing to do anymore.
2988 __ btst(TypeEntries::type_unknown, tmp2);
2989 __ brx(Assembler::notZero, false, Assembler::pt, next);
2990 __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2);
2991 }
2992 }
2993
2994 __ bind(do_update);
2995 __ st_ptr(tmp2, mdo_addr);
2996
2997 __ bind(next);
2998 }
2999 }
3000
align_backward_branch_target()3001 void LIR_Assembler::align_backward_branch_target() {
3002 __ align(OptoLoopAlignment);
3003 }
3004
3005
emit_delay(LIR_OpDelay * op)3006 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
3007 // make sure we are expecting a delay
3008 // this has the side effect of clearing the delay state
3009 // so we can use _masm instead of _masm->delayed() to do the
3010 // code generation.
3011 __ delayed();
3012
3013 // make sure we only emit one instruction
3014 int offset = code_offset();
3015 op->delay_op()->emit_code(this);
3016 #ifdef ASSERT
3017 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
3018 op->delay_op()->print();
3019 }
3020 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
3021 "only one instruction can go in a delay slot");
3022 #endif
3023
3024 // we may also be emitting the call info for the instruction
3025 // which we are the delay slot of.
3026 CodeEmitInfo* call_info = op->call_info();
3027 if (call_info) {
3028 add_call_info(code_offset(), call_info);
3029 }
3030
3031 if (VerifyStackAtCalls) {
3032 _masm->sub(FP, SP, O7);
3033 _masm->cmp(O7, initial_frame_size_in_bytes());
3034 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
3035 }
3036 }
3037
3038
negate(LIR_Opr left,LIR_Opr dest,LIR_Opr tmp)3039 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3040 // tmp must be unused
3041 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3042 assert(left->is_register(), "can only handle registers");
3043
3044 if (left->is_single_cpu()) {
3045 __ neg(left->as_register(), dest->as_register());
3046 } else if (left->is_single_fpu()) {
3047 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
3048 } else if (left->is_double_fpu()) {
3049 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
3050 } else {
3051 assert (left->is_double_cpu(), "Must be a long");
3052 Register Rlow = left->as_register_lo();
3053 Register Rhi = left->as_register_hi();
3054 __ sub(G0, Rlow, dest->as_register_lo());
3055 }
3056 }
3057
3058
fxch(int i)3059 void LIR_Assembler::fxch(int i) {
3060 Unimplemented();
3061 }
3062
fld(int i)3063 void LIR_Assembler::fld(int i) {
3064 Unimplemented();
3065 }
3066
ffree(int i)3067 void LIR_Assembler::ffree(int i) {
3068 Unimplemented();
3069 }
3070
rt_call(LIR_Opr result,address dest,const LIR_OprList * args,LIR_Opr tmp,CodeEmitInfo * info)3071 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
3072 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3073
3074 // if tmp is invalid, then the function being called doesn't destroy the thread
3075 if (tmp->is_valid()) {
3076 __ save_thread(tmp->as_pointer_register());
3077 }
3078 __ call(dest, relocInfo::runtime_call_type);
3079 __ delayed()->nop();
3080 if (info != NULL) {
3081 add_call_info_here(info);
3082 }
3083 if (tmp->is_valid()) {
3084 __ restore_thread(tmp->as_pointer_register());
3085 }
3086
3087 #ifdef ASSERT
3088 __ verify_thread();
3089 #endif // ASSERT
3090 }
3091
3092
volatile_move_op(LIR_Opr src,LIR_Opr dest,BasicType type,CodeEmitInfo * info)3093 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3094 ShouldNotReachHere();
3095
3096 NEEDS_CLEANUP;
3097 if (type == T_LONG) {
3098 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
3099
3100 // (extended to allow indexed as well as constant displaced for JSR-166)
3101 Register idx = noreg; // contains either constant offset or index
3102
3103 int disp = mem_addr->disp();
3104 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3105 if (!Assembler::is_simm13(disp)) {
3106 idx = O7;
3107 __ set(disp, idx);
3108 }
3109 } else {
3110 assert(disp == 0, "not both indexed and disp");
3111 idx = mem_addr->index()->as_register();
3112 }
3113
3114 int null_check_offset = -1;
3115
3116 Register base = mem_addr->base()->as_register();
3117 if (src->is_register() && dest->is_address()) {
3118 // G4 is high half, G5 is low half
3119 // clear the top bits of G5, and scale up G4
3120 __ srl (src->as_register_lo(), 0, G5);
3121 __ sllx(src->as_register_hi(), 32, G4);
3122 // combine the two halves into the 64 bits of G4
3123 __ or3(G4, G5, G4);
3124 null_check_offset = __ offset();
3125 if (idx == noreg) {
3126 __ stx(G4, base, disp);
3127 } else {
3128 __ stx(G4, base, idx);
3129 }
3130 } else if (src->is_address() && dest->is_register()) {
3131 null_check_offset = __ offset();
3132 if (idx == noreg) {
3133 __ ldx(base, disp, G5);
3134 } else {
3135 __ ldx(base, idx, G5);
3136 }
3137 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3138 __ mov (G5, dest->as_register_lo()); // copy low half into lo
3139 } else {
3140 Unimplemented();
3141 }
3142 if (info != NULL) {
3143 add_debug_info_for_null_check(null_check_offset, info);
3144 }
3145
3146 } else {
3147 // use normal move for all other volatiles since they don't need
3148 // special handling to remain atomic.
3149 move_op(src, dest, type, lir_patch_none, info, false, false, false);
3150 }
3151 }
3152
membar()3153 void LIR_Assembler::membar() {
3154 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3155 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3156 }
3157
membar_acquire()3158 void LIR_Assembler::membar_acquire() {
3159 // no-op on TSO
3160 }
3161
membar_release()3162 void LIR_Assembler::membar_release() {
3163 // no-op on TSO
3164 }
3165
membar_loadload()3166 void LIR_Assembler::membar_loadload() {
3167 // no-op
3168 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
3169 }
3170
membar_storestore()3171 void LIR_Assembler::membar_storestore() {
3172 // no-op
3173 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
3174 }
3175
membar_loadstore()3176 void LIR_Assembler::membar_loadstore() {
3177 // no-op
3178 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
3179 }
3180
membar_storeload()3181 void LIR_Assembler::membar_storeload() {
3182 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
3183 }
3184
on_spin_wait()3185 void LIR_Assembler::on_spin_wait() {
3186 Unimplemented();
3187 }
3188
3189 // Pack two sequential registers containing 32 bit values
3190 // into a single 64 bit register.
3191 // src and src->successor() are packed into dst
3192 // src and dst may be the same register.
3193 // Note: src is destroyed
pack64(LIR_Opr src,LIR_Opr dst)3194 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
3195 Register rs = src->as_register();
3196 Register rd = dst->as_register_lo();
3197 __ sllx(rs, 32, rs);
3198 __ srl(rs->successor(), 0, rs->successor());
3199 __ or3(rs, rs->successor(), rd);
3200 }
3201
3202 // Unpack a 64 bit value in a register into
3203 // two sequential registers.
3204 // src is unpacked into dst and dst->successor()
unpack64(LIR_Opr src,LIR_Opr dst)3205 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
3206 Register rs = src->as_register_lo();
3207 Register rd = dst->as_register_hi();
3208 assert_different_registers(rs, rd, rd->successor());
3209 __ srlx(rs, 32, rd);
3210 __ srl (rs, 0, rd->successor());
3211 }
3212
leal(LIR_Opr addr_opr,LIR_Opr dest,LIR_PatchCode patch_code,CodeEmitInfo * info)3213 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3214 const LIR_Address* addr = addr_opr->as_address_ptr();
3215 assert(addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet");
3216 const Register dest_reg = dest->as_pointer_register();
3217 const Register base_reg = addr->base()->as_pointer_register();
3218
3219 if (patch_code != lir_patch_none) {
3220 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3221 assert(addr->disp() != 0, "must have");
3222 assert(base_reg != G3_scratch, "invariant");
3223 __ patchable_set(0, G3_scratch);
3224 patching_epilog(patch, patch_code, base_reg, info);
3225 assert(dest_reg != G3_scratch, "invariant");
3226 if (addr->index()->is_valid()) {
3227 const Register index_reg = addr->index()->as_pointer_register();
3228 assert(index_reg != G3_scratch, "invariant");
3229 __ add(index_reg, G3_scratch, G3_scratch);
3230 }
3231 __ add(base_reg, G3_scratch, dest_reg);
3232 } else {
3233 if (Assembler::is_simm13(addr->disp())) {
3234 if (addr->index()->is_valid()) {
3235 const Register index_reg = addr->index()->as_pointer_register();
3236 assert(index_reg != G3_scratch, "invariant");
3237 __ add(base_reg, addr->disp(), G3_scratch);
3238 __ add(index_reg, G3_scratch, dest_reg);
3239 } else {
3240 __ add(base_reg, addr->disp(), dest_reg);
3241 }
3242 } else {
3243 __ set(addr->disp(), G3_scratch);
3244 if (addr->index()->is_valid()) {
3245 const Register index_reg = addr->index()->as_pointer_register();
3246 assert(index_reg != G3_scratch, "invariant");
3247 __ add(index_reg, G3_scratch, G3_scratch);
3248 }
3249 __ add(base_reg, G3_scratch, dest_reg);
3250 }
3251 }
3252 }
3253
3254
get_thread(LIR_Opr result_reg)3255 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3256 assert(result_reg->is_register(), "check");
3257 __ mov(G2_thread, result_reg->as_register());
3258 }
3259
3260 #ifdef ASSERT
3261 // emit run-time assertion
emit_assert(LIR_OpAssert * op)3262 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3263 assert(op->code() == lir_assert, "must be");
3264
3265 if (op->in_opr1()->is_valid()) {
3266 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3267 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3268 } else {
3269 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3270 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3271 }
3272
3273 Label ok;
3274 if (op->condition() != lir_cond_always) {
3275 Assembler::Condition acond;
3276 switch (op->condition()) {
3277 case lir_cond_equal: acond = Assembler::equal; break;
3278 case lir_cond_notEqual: acond = Assembler::notEqual; break;
3279 case lir_cond_less: acond = Assembler::less; break;
3280 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3281 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
3282 case lir_cond_greater: acond = Assembler::greater; break;
3283 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
3284 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
3285 default: ShouldNotReachHere();
3286 };
3287 __ br(acond, false, Assembler::pt, ok);
3288 __ delayed()->nop();
3289 }
3290 if (op->halt()) {
3291 const char* str = __ code_string(op->msg());
3292 __ stop(str);
3293 } else {
3294 breakpoint();
3295 }
3296 __ bind(ok);
3297 }
3298 #endif
3299
peephole(LIR_List * lir)3300 void LIR_Assembler::peephole(LIR_List* lir) {
3301 LIR_OpList* inst = lir->instructions_list();
3302 for (int i = 0; i < inst->length(); i++) {
3303 LIR_Op* op = inst->at(i);
3304 switch (op->code()) {
3305 case lir_cond_float_branch:
3306 case lir_branch: {
3307 LIR_OpBranch* branch = op->as_OpBranch();
3308 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
3309 LIR_Op* delay_op = NULL;
3310 // we'd like to be able to pull following instructions into
3311 // this slot but we don't know enough to do it safely yet so
3312 // only optimize block to block control flow.
3313 if (LIRFillDelaySlots && branch->block()) {
3314 LIR_Op* prev = inst->at(i - 1);
3315 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
3316 // swap previous instruction into delay slot
3317 inst->at_put(i - 1, op);
3318 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3319 #ifndef PRODUCT
3320 if (LIRTracePeephole) {
3321 tty->print_cr("delayed");
3322 inst->at(i - 1)->print();
3323 inst->at(i)->print();
3324 tty->cr();
3325 }
3326 #endif
3327 continue;
3328 }
3329 }
3330
3331 if (!delay_op) {
3332 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
3333 }
3334 inst->insert_before(i + 1, delay_op);
3335 break;
3336 }
3337 case lir_static_call:
3338 case lir_virtual_call:
3339 case lir_icvirtual_call:
3340 case lir_optvirtual_call:
3341 case lir_dynamic_call: {
3342 LIR_Op* prev = inst->at(i - 1);
3343 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
3344 (op->code() != lir_virtual_call ||
3345 !prev->result_opr()->is_single_cpu() ||
3346 prev->result_opr()->as_register() != O0) &&
3347 LIR_Assembler::is_single_instruction(prev)) {
3348 // Only moves without info can be put into the delay slot.
3349 // Also don't allow the setup of the receiver in the delay
3350 // slot for vtable calls.
3351 inst->at_put(i - 1, op);
3352 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3353 #ifndef PRODUCT
3354 if (LIRTracePeephole) {
3355 tty->print_cr("delayed");
3356 inst->at(i - 1)->print();
3357 inst->at(i)->print();
3358 tty->cr();
3359 }
3360 #endif
3361 } else {
3362 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
3363 inst->insert_before(i + 1, delay_op);
3364 i++;
3365 }
3366 break;
3367 }
3368 }
3369 }
3370 }
3371
atomic_op(LIR_Code code,LIR_Opr src,LIR_Opr data,LIR_Opr dest,LIR_Opr tmp)3372 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3373 LIR_Address* addr = src->as_address_ptr();
3374
3375 assert(data == dest, "swap uses only 2 operands");
3376 assert (code == lir_xchg, "no xadd on sparc");
3377
3378 if (data->type() == T_INT) {
3379 __ swap(as_Address(addr), data->as_register());
3380 } else if (data->is_oop()) {
3381 Register obj = data->as_register();
3382 Register narrow = tmp->as_register();
3383 assert(UseCompressedOops, "swap is 32bit only");
3384 __ encode_heap_oop(obj, narrow);
3385 __ swap(as_Address(addr), narrow);
3386 __ decode_heap_oop(narrow, obj);
3387 } else {
3388 ShouldNotReachHere();
3389 }
3390 }
3391
3392 #undef __
3393