1 /*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "asm/assembler.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_Compilation.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArrayKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/barrierSet.hpp"
39 #include "gc/shared/cardTableBarrierSet.hpp"
40 #include "gc/shared/collectedHeap.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/objArrayKlass.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "vmreg_aarch64.inline.hpp"
46
47
48
49 #ifndef PRODUCT
50 #define COMMENT(x) do { __ block_comment(x); } while (0)
51 #else
52 #define COMMENT(x)
53 #endif
54
55 NEEDS_CLEANUP // remove this definitions ?
56 const Register IC_Klass = rscratch2; // where the IC klass is cached
57 const Register SYNC_header = r0; // synchronization header
58 const Register SHIFT_count = r0; // where count for shift operations must be
59
60 #define __ _masm->
61
62
select_different_registers(Register preserve,Register extra,Register & tmp1,Register & tmp2)63 static void select_different_registers(Register preserve,
64 Register extra,
65 Register &tmp1,
66 Register &tmp2) {
67 if (tmp1 == preserve) {
68 assert_different_registers(tmp1, tmp2, extra);
69 tmp1 = extra;
70 } else if (tmp2 == preserve) {
71 assert_different_registers(tmp1, tmp2, extra);
72 tmp2 = extra;
73 }
74 assert_different_registers(preserve, tmp1, tmp2);
75 }
76
77
78
select_different_registers(Register preserve,Register extra,Register & tmp1,Register & tmp2,Register & tmp3)79 static void select_different_registers(Register preserve,
80 Register extra,
81 Register &tmp1,
82 Register &tmp2,
83 Register &tmp3) {
84 if (tmp1 == preserve) {
85 assert_different_registers(tmp1, tmp2, tmp3, extra);
86 tmp1 = extra;
87 } else if (tmp2 == preserve) {
88 assert_different_registers(tmp1, tmp2, tmp3, extra);
89 tmp2 = extra;
90 } else if (tmp3 == preserve) {
91 assert_different_registers(tmp1, tmp2, tmp3, extra);
92 tmp3 = extra;
93 }
94 assert_different_registers(preserve, tmp1, tmp2, tmp3);
95 }
96
97
is_small_constant(LIR_Opr opr)98 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
99
100
receiverOpr()101 LIR_Opr LIR_Assembler::receiverOpr() {
102 return FrameMap::receiver_opr;
103 }
104
osrBufferPointer()105 LIR_Opr LIR_Assembler::osrBufferPointer() {
106 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
107 }
108
109 //--------------fpu register translations-----------------------
110
111
float_constant(float f)112 address LIR_Assembler::float_constant(float f) {
113 address const_addr = __ float_constant(f);
114 if (const_addr == NULL) {
115 bailout("const section overflow");
116 return __ code()->consts()->start();
117 } else {
118 return const_addr;
119 }
120 }
121
122
double_constant(double d)123 address LIR_Assembler::double_constant(double d) {
124 address const_addr = __ double_constant(d);
125 if (const_addr == NULL) {
126 bailout("const section overflow");
127 return __ code()->consts()->start();
128 } else {
129 return const_addr;
130 }
131 }
132
int_constant(jlong n)133 address LIR_Assembler::int_constant(jlong n) {
134 address const_addr = __ long_constant(n);
135 if (const_addr == NULL) {
136 bailout("const section overflow");
137 return __ code()->consts()->start();
138 } else {
139 return const_addr;
140 }
141 }
142
set_24bit_FPU()143 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
144
reset_FPU()145 void LIR_Assembler::reset_FPU() { Unimplemented(); }
146
fpop()147 void LIR_Assembler::fpop() { Unimplemented(); }
148
fxch(int i)149 void LIR_Assembler::fxch(int i) { Unimplemented(); }
150
fld(int i)151 void LIR_Assembler::fld(int i) { Unimplemented(); }
152
ffree(int i)153 void LIR_Assembler::ffree(int i) { Unimplemented(); }
154
breakpoint()155 void LIR_Assembler::breakpoint() { Unimplemented(); }
156
push(LIR_Opr opr)157 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
158
pop(LIR_Opr opr)159 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
160
is_literal_address(LIR_Address * addr)161 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
162 //-------------------------------------------
163
as_reg(LIR_Opr op)164 static Register as_reg(LIR_Opr op) {
165 return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
166 }
167
as_long(LIR_Opr data)168 static jlong as_long(LIR_Opr data) {
169 jlong result;
170 switch (data->type()) {
171 case T_INT:
172 result = (data->as_jint());
173 break;
174 case T_LONG:
175 result = (data->as_jlong());
176 break;
177 default:
178 ShouldNotReachHere();
179 result = 0; // unreachable
180 }
181 return result;
182 }
183
as_Address(LIR_Address * addr,Register tmp)184 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
185 Register base = addr->base()->as_pointer_register();
186 LIR_Opr opr = addr->index();
187 if (opr->is_cpu_register()) {
188 Register index;
189 if (opr->is_single_cpu())
190 index = opr->as_register();
191 else
192 index = opr->as_register_lo();
193 assert(addr->disp() == 0, "must be");
194 switch(opr->type()) {
195 case T_INT:
196 return Address(base, index, Address::sxtw(addr->scale()));
197 case T_LONG:
198 return Address(base, index, Address::lsl(addr->scale()));
199 default:
200 ShouldNotReachHere();
201 }
202 } else {
203 intptr_t addr_offset = intptr_t(addr->disp());
204 if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
205 return Address(base, addr_offset, Address::lsl(addr->scale()));
206 else {
207 __ mov(tmp, addr_offset);
208 return Address(base, tmp, Address::lsl(addr->scale()));
209 }
210 }
211 return Address();
212 }
213
as_Address_hi(LIR_Address * addr)214 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
215 ShouldNotReachHere();
216 return Address();
217 }
218
as_Address(LIR_Address * addr)219 Address LIR_Assembler::as_Address(LIR_Address* addr) {
220 return as_Address(addr, rscratch1);
221 }
222
as_Address_lo(LIR_Address * addr)223 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
224 return as_Address(addr, rscratch1); // Ouch
225 // FIXME: This needs to be much more clever. See x86.
226 }
227
228
osr_entry()229 void LIR_Assembler::osr_entry() {
230 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
231 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
232 ValueStack* entry_state = osr_entry->state();
233 int number_of_locks = entry_state->locks_size();
234
235 // we jump here if osr happens with the interpreter
236 // state set up to continue at the beginning of the
237 // loop that triggered osr - in particular, we have
238 // the following registers setup:
239 //
240 // r2: osr buffer
241 //
242
243 // build frame
244 ciMethod* m = compilation()->method();
245 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
246
247 // OSR buffer is
248 //
249 // locals[nlocals-1..0]
250 // monitors[0..number_of_locks]
251 //
252 // locals is a direct copy of the interpreter frame so in the osr buffer
253 // so first slot in the local array is the last local from the interpreter
254 // and last slot is local[0] (receiver) from the interpreter
255 //
256 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
257 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
258 // in the interpreter frame (the method lock if a sync method)
259
260 // Initialize monitors in the compiled activation.
261 // r2: pointer to osr buffer
262 //
263 // All other registers are dead at this point and the locals will be
264 // copied into place by code emitted in the IR.
265
266 Register OSR_buf = osrBufferPointer()->as_pointer_register();
267 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
268 int monitor_offset = BytesPerWord * method()->max_locals() +
269 (2 * BytesPerWord) * (number_of_locks - 1);
270 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
271 // the OSR buffer using 2 word entries: first the lock and then
272 // the oop.
273 for (int i = 0; i < number_of_locks; i++) {
274 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
275 #ifdef ASSERT
276 // verify the interpreter's monitor has a non-null object
277 {
278 Label L;
279 __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
280 __ cbnz(rscratch1, L);
281 __ stop("locked object is NULL");
282 __ bind(L);
283 }
284 #endif
285 __ ldr(r19, Address(OSR_buf, slot_offset + 0));
286 __ str(r19, frame_map()->address_for_monitor_lock(i));
287 __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
288 __ str(r19, frame_map()->address_for_monitor_object(i));
289 }
290 }
291 }
292
293
294 // inline cache check; done before the frame is built.
check_icache()295 int LIR_Assembler::check_icache() {
296 Register receiver = FrameMap::receiver_opr->as_register();
297 Register ic_klass = IC_Klass;
298 int start_offset = __ offset();
299 __ inline_cache_check(receiver, ic_klass);
300
301 // if icache check fails, then jump to runtime routine
302 // Note: RECEIVER must still contain the receiver!
303 Label dont;
304 __ br(Assembler::EQ, dont);
305 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
306
307 // We align the verified entry point unless the method body
308 // (including its inline cache check) will fit in a single 64-byte
309 // icache line.
310 if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
311 // force alignment after the cache check.
312 __ align(CodeEntryAlignment);
313 }
314
315 __ bind(dont);
316 return start_offset;
317 }
318
clinit_barrier(ciMethod * method)319 void LIR_Assembler::clinit_barrier(ciMethod* method) {
320 ShouldNotReachHere(); // not implemented
321 }
322
jobject2reg(jobject o,Register reg)323 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
324 if (o == NULL) {
325 __ mov(reg, zr);
326 } else {
327 __ movoop(reg, o, /*immediate*/true);
328 }
329 }
330
deoptimize_trap(CodeEmitInfo * info)331 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
332 address target = NULL;
333 relocInfo::relocType reloc_type = relocInfo::none;
334
335 switch (patching_id(info)) {
336 case PatchingStub::access_field_id:
337 target = Runtime1::entry_for(Runtime1::access_field_patching_id);
338 reloc_type = relocInfo::section_word_type;
339 break;
340 case PatchingStub::load_klass_id:
341 target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
342 reloc_type = relocInfo::metadata_type;
343 break;
344 case PatchingStub::load_mirror_id:
345 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
346 reloc_type = relocInfo::oop_type;
347 break;
348 case PatchingStub::load_appendix_id:
349 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
350 reloc_type = relocInfo::oop_type;
351 break;
352 default: ShouldNotReachHere();
353 }
354
355 __ far_call(RuntimeAddress(target));
356 add_call_info_here(info);
357 }
358
jobject2reg_with_patching(Register reg,CodeEmitInfo * info)359 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
360 deoptimize_trap(info);
361 }
362
363
364 // This specifies the rsp decrement needed to build the frame
initial_frame_size_in_bytes() const365 int LIR_Assembler::initial_frame_size_in_bytes() const {
366 // if rounding, must let FrameMap know!
367
368 // The frame_map records size in slots (32bit word)
369
370 // subtract two words to account for return address and link
371 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
372 }
373
374
emit_exception_handler()375 int LIR_Assembler::emit_exception_handler() {
376 // if the last instruction is a call (typically to do a throw which
377 // is coming at the end after block reordering) the return address
378 // must still point into the code area in order to avoid assertion
379 // failures when searching for the corresponding bci => add a nop
380 // (was bug 5/14/1999 - gri)
381 __ nop();
382
383 // generate code for exception handler
384 address handler_base = __ start_a_stub(exception_handler_size());
385 if (handler_base == NULL) {
386 // not enough space left for the handler
387 bailout("exception handler overflow");
388 return -1;
389 }
390
391 int offset = code_offset();
392
393 // the exception oop and pc are in r0, and r3
394 // no other registers need to be preserved, so invalidate them
395 __ invalidate_registers(false, true, true, false, true, true);
396
397 // check that there is really an exception
398 __ verify_not_null_oop(r0);
399
400 // search an exception handler (r0: exception oop, r3: throwing pc)
401 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here();
402 guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
403 __ end_a_stub();
404
405 return offset;
406 }
407
408
409 // Emit the code to remove the frame from the stack in the exception
410 // unwind path.
emit_unwind_handler()411 int LIR_Assembler::emit_unwind_handler() {
412 #ifndef PRODUCT
413 if (CommentedAssembly) {
414 _masm->block_comment("Unwind handler");
415 }
416 #endif
417
418 int offset = code_offset();
419
420 // Fetch the exception from TLS and clear out exception related thread state
421 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
422 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
423 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
424
425 __ bind(_unwind_handler_entry);
426 __ verify_not_null_oop(r0);
427 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
428 __ mov(r19, r0); // Preserve the exception
429 }
430
431 // Preform needed unlocking
432 MonitorExitStub* stub = NULL;
433 if (method()->is_synchronized()) {
434 monitor_address(0, FrameMap::r0_opr);
435 stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
436 __ unlock_object(r5, r4, r0, *stub->entry());
437 __ bind(*stub->continuation());
438 }
439
440 if (compilation()->env()->dtrace_method_probes()) {
441 __ call_Unimplemented();
442 #if 0
443 __ movptr(Address(rsp, 0), rax);
444 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
445 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
446 #endif
447 }
448
449 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
450 __ mov(r0, r19); // Restore the exception
451 }
452
453 // remove the activation and dispatch to the unwind handler
454 __ block_comment("remove_frame and dispatch to the unwind handler");
455 __ remove_frame(initial_frame_size_in_bytes());
456 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
457
458 // Emit the slow path assembly
459 if (stub != NULL) {
460 stub->emit_code(this);
461 }
462
463 return offset;
464 }
465
466
emit_deopt_handler()467 int LIR_Assembler::emit_deopt_handler() {
468 // if the last instruction is a call (typically to do a throw which
469 // is coming at the end after block reordering) the return address
470 // must still point into the code area in order to avoid assertion
471 // failures when searching for the corresponding bci => add a nop
472 // (was bug 5/14/1999 - gri)
473 __ nop();
474
475 // generate code for exception handler
476 address handler_base = __ start_a_stub(deopt_handler_size());
477 if (handler_base == NULL) {
478 // not enough space left for the handler
479 bailout("deopt handler overflow");
480 return -1;
481 }
482
483 int offset = code_offset();
484
485 __ adr(lr, pc());
486 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
487 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
488 __ end_a_stub();
489
490 return offset;
491 }
492
add_debug_info_for_branch(address adr,CodeEmitInfo * info)493 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
494 _masm->code_section()->relocate(adr, relocInfo::poll_type);
495 int pc_offset = code_offset();
496 flush_debug_info(pc_offset);
497 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
498 if (info->exception_handlers() != NULL) {
499 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
500 }
501 }
502
return_op(LIR_Opr result)503 void LIR_Assembler::return_op(LIR_Opr result) {
504 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
505
506 // Pop the stack before the safepoint code
507 __ remove_frame(initial_frame_size_in_bytes());
508
509 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
510 __ reserved_stack_check();
511 }
512
513 address polling_page(os::get_polling_page());
514 __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
515 __ ret(lr);
516 }
517
safepoint_poll(LIR_Opr tmp,CodeEmitInfo * info)518 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
519 address polling_page(os::get_polling_page());
520 guarantee(info != NULL, "Shouldn't be NULL");
521 assert(os::is_poll_address(polling_page), "should be");
522 __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
523 add_debug_info_for_branch(info); // This isn't just debug info:
524 // it's the oop map
525 __ read_polling_page(rscratch1, relocInfo::poll_type);
526 return __ offset();
527 }
528
529
move_regs(Register from_reg,Register to_reg)530 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
531 if (from_reg == r31_sp)
532 from_reg = sp;
533 if (to_reg == r31_sp)
534 to_reg = sp;
535 __ mov(to_reg, from_reg);
536 }
537
swap_reg(Register a,Register b)538 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
539
540
const2reg(LIR_Opr src,LIR_Opr dest,LIR_PatchCode patch_code,CodeEmitInfo * info)541 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
542 assert(src->is_constant(), "should not call otherwise");
543 assert(dest->is_register(), "should not call otherwise");
544 LIR_Const* c = src->as_constant_ptr();
545
546 switch (c->type()) {
547 case T_INT: {
548 assert(patch_code == lir_patch_none, "no patching handled here");
549 __ movw(dest->as_register(), c->as_jint());
550 break;
551 }
552
553 case T_ADDRESS: {
554 assert(patch_code == lir_patch_none, "no patching handled here");
555 __ mov(dest->as_register(), c->as_jint());
556 break;
557 }
558
559 case T_LONG: {
560 assert(patch_code == lir_patch_none, "no patching handled here");
561 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
562 break;
563 }
564
565 case T_OBJECT: {
566 if (patch_code == lir_patch_none) {
567 jobject2reg(c->as_jobject(), dest->as_register());
568 } else {
569 jobject2reg_with_patching(dest->as_register(), info);
570 }
571 break;
572 }
573
574 case T_METADATA: {
575 if (patch_code != lir_patch_none) {
576 klass2reg_with_patching(dest->as_register(), info);
577 } else {
578 __ mov_metadata(dest->as_register(), c->as_metadata());
579 }
580 break;
581 }
582
583 case T_FLOAT: {
584 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
585 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
586 } else {
587 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
588 __ ldrs(dest->as_float_reg(), Address(rscratch1));
589 }
590 break;
591 }
592
593 case T_DOUBLE: {
594 if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
595 __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
596 } else {
597 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
598 __ ldrd(dest->as_double_reg(), Address(rscratch1));
599 }
600 break;
601 }
602
603 default:
604 ShouldNotReachHere();
605 }
606 }
607
const2stack(LIR_Opr src,LIR_Opr dest)608 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
609 LIR_Const* c = src->as_constant_ptr();
610 switch (c->type()) {
611 case T_OBJECT:
612 {
613 if (! c->as_jobject())
614 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
615 else {
616 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
617 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
618 }
619 }
620 break;
621 case T_ADDRESS:
622 {
623 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
624 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
625 }
626 case T_INT:
627 case T_FLOAT:
628 {
629 Register reg = zr;
630 if (c->as_jint_bits() == 0)
631 __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
632 else {
633 __ movw(rscratch1, c->as_jint_bits());
634 __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
635 }
636 }
637 break;
638 case T_LONG:
639 case T_DOUBLE:
640 {
641 Register reg = zr;
642 if (c->as_jlong_bits() == 0)
643 __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
644 lo_word_offset_in_bytes));
645 else {
646 __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
647 __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
648 lo_word_offset_in_bytes));
649 }
650 }
651 break;
652 default:
653 ShouldNotReachHere();
654 }
655 }
656
const2mem(LIR_Opr src,LIR_Opr dest,BasicType type,CodeEmitInfo * info,bool wide)657 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
658 assert(src->is_constant(), "should not call otherwise");
659 LIR_Const* c = src->as_constant_ptr();
660 LIR_Address* to_addr = dest->as_address_ptr();
661
662 void (Assembler::* insn)(Register Rt, const Address &adr);
663
664 switch (type) {
665 case T_ADDRESS:
666 assert(c->as_jint() == 0, "should be");
667 insn = &Assembler::str;
668 break;
669 case T_LONG:
670 assert(c->as_jlong() == 0, "should be");
671 insn = &Assembler::str;
672 break;
673 case T_INT:
674 assert(c->as_jint() == 0, "should be");
675 insn = &Assembler::strw;
676 break;
677 case T_OBJECT:
678 case T_ARRAY:
679 assert(c->as_jobject() == 0, "should be");
680 if (UseCompressedOops && !wide) {
681 insn = &Assembler::strw;
682 } else {
683 insn = &Assembler::str;
684 }
685 break;
686 case T_CHAR:
687 case T_SHORT:
688 assert(c->as_jint() == 0, "should be");
689 insn = &Assembler::strh;
690 break;
691 case T_BOOLEAN:
692 case T_BYTE:
693 assert(c->as_jint() == 0, "should be");
694 insn = &Assembler::strb;
695 break;
696 default:
697 ShouldNotReachHere();
698 insn = &Assembler::str; // unreachable
699 }
700
701 if (info) add_debug_info_for_null_check_here(info);
702 (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
703 }
704
reg2reg(LIR_Opr src,LIR_Opr dest)705 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
706 assert(src->is_register(), "should not call otherwise");
707 assert(dest->is_register(), "should not call otherwise");
708
709 // move between cpu-registers
710 if (dest->is_single_cpu()) {
711 if (src->type() == T_LONG) {
712 // Can do LONG -> OBJECT
713 move_regs(src->as_register_lo(), dest->as_register());
714 return;
715 }
716 assert(src->is_single_cpu(), "must match");
717 if (src->type() == T_OBJECT) {
718 __ verify_oop(src->as_register());
719 }
720 move_regs(src->as_register(), dest->as_register());
721
722 } else if (dest->is_double_cpu()) {
723 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
724 // Surprising to me but we can see move of a long to t_object
725 __ verify_oop(src->as_register());
726 move_regs(src->as_register(), dest->as_register_lo());
727 return;
728 }
729 assert(src->is_double_cpu(), "must match");
730 Register f_lo = src->as_register_lo();
731 Register f_hi = src->as_register_hi();
732 Register t_lo = dest->as_register_lo();
733 Register t_hi = dest->as_register_hi();
734 assert(f_hi == f_lo, "must be same");
735 assert(t_hi == t_lo, "must be same");
736 move_regs(f_lo, t_lo);
737
738 } else if (dest->is_single_fpu()) {
739 __ fmovs(dest->as_float_reg(), src->as_float_reg());
740
741 } else if (dest->is_double_fpu()) {
742 __ fmovd(dest->as_double_reg(), src->as_double_reg());
743
744 } else {
745 ShouldNotReachHere();
746 }
747 }
748
reg2stack(LIR_Opr src,LIR_Opr dest,BasicType type,bool pop_fpu_stack)749 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
750 if (src->is_single_cpu()) {
751 if (type == T_ARRAY || type == T_OBJECT) {
752 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
753 __ verify_oop(src->as_register());
754 } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
755 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
756 } else {
757 __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
758 }
759
760 } else if (src->is_double_cpu()) {
761 Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
762 __ str(src->as_register_lo(), dest_addr_LO);
763
764 } else if (src->is_single_fpu()) {
765 Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
766 __ strs(src->as_float_reg(), dest_addr);
767
768 } else if (src->is_double_fpu()) {
769 Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
770 __ strd(src->as_double_reg(), dest_addr);
771
772 } else {
773 ShouldNotReachHere();
774 }
775
776 }
777
778
reg2mem(LIR_Opr src,LIR_Opr dest,BasicType type,LIR_PatchCode patch_code,CodeEmitInfo * info,bool pop_fpu_stack,bool wide,bool)779 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
780 LIR_Address* to_addr = dest->as_address_ptr();
781 PatchingStub* patch = NULL;
782 Register compressed_src = rscratch1;
783
784 if (patch_code != lir_patch_none) {
785 deoptimize_trap(info);
786 return;
787 }
788
789 if (type == T_ARRAY || type == T_OBJECT) {
790 __ verify_oop(src->as_register());
791
792 if (UseCompressedOops && !wide) {
793 __ encode_heap_oop(compressed_src, src->as_register());
794 } else {
795 compressed_src = src->as_register();
796 }
797 }
798
799 int null_check_here = code_offset();
800 switch (type) {
801 case T_FLOAT: {
802 __ strs(src->as_float_reg(), as_Address(to_addr));
803 break;
804 }
805
806 case T_DOUBLE: {
807 __ strd(src->as_double_reg(), as_Address(to_addr));
808 break;
809 }
810
811 case T_ARRAY: // fall through
812 case T_OBJECT: // fall through
813 if (UseCompressedOops && !wide) {
814 __ strw(compressed_src, as_Address(to_addr, rscratch2));
815 } else {
816 __ str(compressed_src, as_Address(to_addr));
817 }
818 break;
819 case T_METADATA:
820 // We get here to store a method pointer to the stack to pass to
821 // a dtrace runtime call. This can't work on 64 bit with
822 // compressed klass ptrs: T_METADATA can be a compressed klass
823 // ptr or a 64 bit method pointer.
824 ShouldNotReachHere();
825 __ str(src->as_register(), as_Address(to_addr));
826 break;
827 case T_ADDRESS:
828 __ str(src->as_register(), as_Address(to_addr));
829 break;
830 case T_INT:
831 __ strw(src->as_register(), as_Address(to_addr));
832 break;
833
834 case T_LONG: {
835 __ str(src->as_register_lo(), as_Address_lo(to_addr));
836 break;
837 }
838
839 case T_BYTE: // fall through
840 case T_BOOLEAN: {
841 __ strb(src->as_register(), as_Address(to_addr));
842 break;
843 }
844
845 case T_CHAR: // fall through
846 case T_SHORT:
847 __ strh(src->as_register(), as_Address(to_addr));
848 break;
849
850 default:
851 ShouldNotReachHere();
852 }
853 if (info != NULL) {
854 add_debug_info_for_null_check(null_check_here, info);
855 }
856 }
857
858
stack2reg(LIR_Opr src,LIR_Opr dest,BasicType type)859 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
860 assert(src->is_stack(), "should not call otherwise");
861 assert(dest->is_register(), "should not call otherwise");
862
863 if (dest->is_single_cpu()) {
864 if (type == T_ARRAY || type == T_OBJECT) {
865 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
866 __ verify_oop(dest->as_register());
867 } else if (type == T_METADATA || type == T_ADDRESS) {
868 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
869 } else {
870 __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
871 }
872
873 } else if (dest->is_double_cpu()) {
874 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
875 __ ldr(dest->as_register_lo(), src_addr_LO);
876
877 } else if (dest->is_single_fpu()) {
878 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
879 __ ldrs(dest->as_float_reg(), src_addr);
880
881 } else if (dest->is_double_fpu()) {
882 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
883 __ ldrd(dest->as_double_reg(), src_addr);
884
885 } else {
886 ShouldNotReachHere();
887 }
888 }
889
890
klass2reg_with_patching(Register reg,CodeEmitInfo * info)891 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
892 address target = NULL;
893 relocInfo::relocType reloc_type = relocInfo::none;
894
895 switch (patching_id(info)) {
896 case PatchingStub::access_field_id:
897 target = Runtime1::entry_for(Runtime1::access_field_patching_id);
898 reloc_type = relocInfo::section_word_type;
899 break;
900 case PatchingStub::load_klass_id:
901 target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
902 reloc_type = relocInfo::metadata_type;
903 break;
904 case PatchingStub::load_mirror_id:
905 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
906 reloc_type = relocInfo::oop_type;
907 break;
908 case PatchingStub::load_appendix_id:
909 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
910 reloc_type = relocInfo::oop_type;
911 break;
912 default: ShouldNotReachHere();
913 }
914
915 __ far_call(RuntimeAddress(target));
916 add_call_info_here(info);
917 }
918
stack2stack(LIR_Opr src,LIR_Opr dest,BasicType type)919 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
920
921 LIR_Opr temp;
922 if (type == T_LONG || type == T_DOUBLE)
923 temp = FrameMap::rscratch1_long_opr;
924 else
925 temp = FrameMap::rscratch1_opr;
926
927 stack2reg(src, temp, src->type());
928 reg2stack(temp, dest, dest->type(), false);
929 }
930
931
mem2reg(LIR_Opr src,LIR_Opr dest,BasicType type,LIR_PatchCode patch_code,CodeEmitInfo * info,bool wide,bool)932 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
933 LIR_Address* addr = src->as_address_ptr();
934 LIR_Address* from_addr = src->as_address_ptr();
935
936 if (addr->base()->type() == T_OBJECT) {
937 __ verify_oop(addr->base()->as_pointer_register());
938 }
939
940 if (patch_code != lir_patch_none) {
941 deoptimize_trap(info);
942 return;
943 }
944
945 if (info != NULL) {
946 add_debug_info_for_null_check_here(info);
947 }
948 int null_check_here = code_offset();
949 switch (type) {
950 case T_FLOAT: {
951 __ ldrs(dest->as_float_reg(), as_Address(from_addr));
952 break;
953 }
954
955 case T_DOUBLE: {
956 __ ldrd(dest->as_double_reg(), as_Address(from_addr));
957 break;
958 }
959
960 case T_ARRAY: // fall through
961 case T_OBJECT: // fall through
962 if (UseCompressedOops && !wide) {
963 __ ldrw(dest->as_register(), as_Address(from_addr));
964 } else {
965 __ ldr(dest->as_register(), as_Address(from_addr));
966 }
967 break;
968 case T_METADATA:
969 // We get here to store a method pointer to the stack to pass to
970 // a dtrace runtime call. This can't work on 64 bit with
971 // compressed klass ptrs: T_METADATA can be a compressed klass
972 // ptr or a 64 bit method pointer.
973 ShouldNotReachHere();
974 __ ldr(dest->as_register(), as_Address(from_addr));
975 break;
976 case T_ADDRESS:
977 // FIXME: OMG this is a horrible kludge. Any offset from an
978 // address that matches klass_offset_in_bytes() will be loaded
979 // as a word, not a long.
980 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
981 __ ldrw(dest->as_register(), as_Address(from_addr));
982 } else {
983 __ ldr(dest->as_register(), as_Address(from_addr));
984 }
985 break;
986 case T_INT:
987 __ ldrw(dest->as_register(), as_Address(from_addr));
988 break;
989
990 case T_LONG: {
991 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
992 break;
993 }
994
995 case T_BYTE:
996 __ ldrsb(dest->as_register(), as_Address(from_addr));
997 break;
998 case T_BOOLEAN: {
999 __ ldrb(dest->as_register(), as_Address(from_addr));
1000 break;
1001 }
1002
1003 case T_CHAR:
1004 __ ldrh(dest->as_register(), as_Address(from_addr));
1005 break;
1006 case T_SHORT:
1007 __ ldrsh(dest->as_register(), as_Address(from_addr));
1008 break;
1009
1010 default:
1011 ShouldNotReachHere();
1012 }
1013
1014 if (type == T_ARRAY || type == T_OBJECT) {
1015 if (UseCompressedOops && !wide) {
1016 __ decode_heap_oop(dest->as_register());
1017 }
1018
1019 if (!UseZGC) {
1020 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1021 __ verify_oop(dest->as_register());
1022 }
1023 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1024 if (UseCompressedClassPointers) {
1025 __ decode_klass_not_null(dest->as_register());
1026 }
1027 }
1028 }
1029
1030
array_element_size(BasicType type) const1031 int LIR_Assembler::array_element_size(BasicType type) const {
1032 int elem_size = type2aelembytes(type);
1033 return exact_log2(elem_size);
1034 }
1035
1036
emit_op3(LIR_Op3 * op)1037 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1038 switch (op->code()) {
1039 case lir_idiv:
1040 case lir_irem:
1041 arithmetic_idiv(op->code(),
1042 op->in_opr1(),
1043 op->in_opr2(),
1044 op->in_opr3(),
1045 op->result_opr(),
1046 op->info());
1047 break;
1048 case lir_fmad:
1049 __ fmaddd(op->result_opr()->as_double_reg(),
1050 op->in_opr1()->as_double_reg(),
1051 op->in_opr2()->as_double_reg(),
1052 op->in_opr3()->as_double_reg());
1053 break;
1054 case lir_fmaf:
1055 __ fmadds(op->result_opr()->as_float_reg(),
1056 op->in_opr1()->as_float_reg(),
1057 op->in_opr2()->as_float_reg(),
1058 op->in_opr3()->as_float_reg());
1059 break;
1060 default: ShouldNotReachHere(); break;
1061 }
1062 }
1063
emit_opBranch(LIR_OpBranch * op)1064 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1065 #ifdef ASSERT
1066 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1067 if (op->block() != NULL) _branch_target_blocks.append(op->block());
1068 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1069 #endif
1070
1071 if (op->cond() == lir_cond_always) {
1072 if (op->info() != NULL) add_debug_info_for_branch(op->info());
1073 __ b(*(op->label()));
1074 } else {
1075 Assembler::Condition acond;
1076 if (op->code() == lir_cond_float_branch) {
1077 bool is_unordered = (op->ublock() == op->block());
1078 // Assembler::EQ does not permit unordered branches, so we add
1079 // another branch here. Likewise, Assembler::NE does not permit
1080 // ordered branches.
1081 if ((is_unordered && op->cond() == lir_cond_equal)
1082 || (!is_unordered && op->cond() == lir_cond_notEqual))
1083 __ br(Assembler::VS, *(op->ublock()->label()));
1084 switch(op->cond()) {
1085 case lir_cond_equal: acond = Assembler::EQ; break;
1086 case lir_cond_notEqual: acond = Assembler::NE; break;
1087 case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1088 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1089 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1090 case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1091 default: ShouldNotReachHere();
1092 acond = Assembler::EQ; // unreachable
1093 }
1094 } else {
1095 switch (op->cond()) {
1096 case lir_cond_equal: acond = Assembler::EQ; break;
1097 case lir_cond_notEqual: acond = Assembler::NE; break;
1098 case lir_cond_less: acond = Assembler::LT; break;
1099 case lir_cond_lessEqual: acond = Assembler::LE; break;
1100 case lir_cond_greaterEqual: acond = Assembler::GE; break;
1101 case lir_cond_greater: acond = Assembler::GT; break;
1102 case lir_cond_belowEqual: acond = Assembler::LS; break;
1103 case lir_cond_aboveEqual: acond = Assembler::HS; break;
1104 default: ShouldNotReachHere();
1105 acond = Assembler::EQ; // unreachable
1106 }
1107 }
1108 __ br(acond,*(op->label()));
1109 }
1110 }
1111
1112
1113
emit_opConvert(LIR_OpConvert * op)1114 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1115 LIR_Opr src = op->in_opr();
1116 LIR_Opr dest = op->result_opr();
1117
1118 switch (op->bytecode()) {
1119 case Bytecodes::_i2f:
1120 {
1121 __ scvtfws(dest->as_float_reg(), src->as_register());
1122 break;
1123 }
1124 case Bytecodes::_i2d:
1125 {
1126 __ scvtfwd(dest->as_double_reg(), src->as_register());
1127 break;
1128 }
1129 case Bytecodes::_l2d:
1130 {
1131 __ scvtfd(dest->as_double_reg(), src->as_register_lo());
1132 break;
1133 }
1134 case Bytecodes::_l2f:
1135 {
1136 __ scvtfs(dest->as_float_reg(), src->as_register_lo());
1137 break;
1138 }
1139 case Bytecodes::_f2d:
1140 {
1141 __ fcvts(dest->as_double_reg(), src->as_float_reg());
1142 break;
1143 }
1144 case Bytecodes::_d2f:
1145 {
1146 __ fcvtd(dest->as_float_reg(), src->as_double_reg());
1147 break;
1148 }
1149 case Bytecodes::_i2c:
1150 {
1151 __ ubfx(dest->as_register(), src->as_register(), 0, 16);
1152 break;
1153 }
1154 case Bytecodes::_i2l:
1155 {
1156 __ sxtw(dest->as_register_lo(), src->as_register());
1157 break;
1158 }
1159 case Bytecodes::_i2s:
1160 {
1161 __ sxth(dest->as_register(), src->as_register());
1162 break;
1163 }
1164 case Bytecodes::_i2b:
1165 {
1166 __ sxtb(dest->as_register(), src->as_register());
1167 break;
1168 }
1169 case Bytecodes::_l2i:
1170 {
1171 _masm->block_comment("FIXME: This could be a no-op");
1172 __ uxtw(dest->as_register(), src->as_register_lo());
1173 break;
1174 }
1175 case Bytecodes::_d2l:
1176 {
1177 __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1178 break;
1179 }
1180 case Bytecodes::_f2i:
1181 {
1182 __ fcvtzsw(dest->as_register(), src->as_float_reg());
1183 break;
1184 }
1185 case Bytecodes::_f2l:
1186 {
1187 __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1188 break;
1189 }
1190 case Bytecodes::_d2i:
1191 {
1192 __ fcvtzdw(dest->as_register(), src->as_double_reg());
1193 break;
1194 }
1195 default: ShouldNotReachHere();
1196 }
1197 }
1198
emit_alloc_obj(LIR_OpAllocObj * op)1199 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1200 if (op->init_check()) {
1201 __ ldrb(rscratch1, Address(op->klass()->as_register(),
1202 InstanceKlass::init_state_offset()));
1203 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1204 add_debug_info_for_null_check_here(op->stub()->info());
1205 __ br(Assembler::NE, *op->stub()->entry());
1206 }
1207 __ allocate_object(op->obj()->as_register(),
1208 op->tmp1()->as_register(),
1209 op->tmp2()->as_register(),
1210 op->header_size(),
1211 op->object_size(),
1212 op->klass()->as_register(),
1213 *op->stub()->entry());
1214 __ bind(*op->stub()->continuation());
1215 }
1216
emit_alloc_array(LIR_OpAllocArray * op)1217 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1218 Register len = op->len()->as_register();
1219 __ uxtw(len, len);
1220
1221 if (UseSlowPath ||
1222 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1223 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1224 __ b(*op->stub()->entry());
1225 } else {
1226 Register tmp1 = op->tmp1()->as_register();
1227 Register tmp2 = op->tmp2()->as_register();
1228 Register tmp3 = op->tmp3()->as_register();
1229 if (len == tmp1) {
1230 tmp1 = tmp3;
1231 } else if (len == tmp2) {
1232 tmp2 = tmp3;
1233 } else if (len == tmp3) {
1234 // everything is ok
1235 } else {
1236 __ mov(tmp3, len);
1237 }
1238 __ allocate_array(op->obj()->as_register(),
1239 len,
1240 tmp1,
1241 tmp2,
1242 arrayOopDesc::header_size(op->type()),
1243 array_element_size(op->type()),
1244 op->klass()->as_register(),
1245 *op->stub()->entry());
1246 }
1247 __ bind(*op->stub()->continuation());
1248 }
1249
type_profile_helper(Register mdo,ciMethodData * md,ciProfileData * data,Register recv,Label * update_done)1250 void LIR_Assembler::type_profile_helper(Register mdo,
1251 ciMethodData *md, ciProfileData *data,
1252 Register recv, Label* update_done) {
1253 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1254 Label next_test;
1255 // See if the receiver is receiver[n].
1256 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1257 __ ldr(rscratch1, Address(rscratch2));
1258 __ cmp(recv, rscratch1);
1259 __ br(Assembler::NE, next_test);
1260 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1261 __ addptr(data_addr, DataLayout::counter_increment);
1262 __ b(*update_done);
1263 __ bind(next_test);
1264 }
1265
1266 // Didn't find receiver; find next empty slot and fill it in
1267 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1268 Label next_test;
1269 __ lea(rscratch2,
1270 Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1271 Address recv_addr(rscratch2);
1272 __ ldr(rscratch1, recv_addr);
1273 __ cbnz(rscratch1, next_test);
1274 __ str(recv, recv_addr);
1275 __ mov(rscratch1, DataLayout::counter_increment);
1276 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1277 __ str(rscratch1, Address(rscratch2));
1278 __ b(*update_done);
1279 __ bind(next_test);
1280 }
1281 }
1282
emit_typecheck_helper(LIR_OpTypeCheck * op,Label * success,Label * failure,Label * obj_is_null)1283 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1284 // we always need a stub for the failure case.
1285 CodeStub* stub = op->stub();
1286 Register obj = op->object()->as_register();
1287 Register k_RInfo = op->tmp1()->as_register();
1288 Register klass_RInfo = op->tmp2()->as_register();
1289 Register dst = op->result_opr()->as_register();
1290 ciKlass* k = op->klass();
1291 Register Rtmp1 = noreg;
1292
1293 // check if it needs to be profiled
1294 ciMethodData* md;
1295 ciProfileData* data;
1296
1297 const bool should_profile = op->should_profile();
1298
1299 if (should_profile) {
1300 ciMethod* method = op->profiled_method();
1301 assert(method != NULL, "Should have method");
1302 int bci = op->profiled_bci();
1303 md = method->method_data_or_null();
1304 assert(md != NULL, "Sanity");
1305 data = md->bci_to_data(bci);
1306 assert(data != NULL, "need data for type check");
1307 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1308 }
1309 Label profile_cast_success, profile_cast_failure;
1310 Label *success_target = should_profile ? &profile_cast_success : success;
1311 Label *failure_target = should_profile ? &profile_cast_failure : failure;
1312
1313 if (obj == k_RInfo) {
1314 k_RInfo = dst;
1315 } else if (obj == klass_RInfo) {
1316 klass_RInfo = dst;
1317 }
1318 if (k->is_loaded() && !UseCompressedClassPointers) {
1319 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1320 } else {
1321 Rtmp1 = op->tmp3()->as_register();
1322 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1323 }
1324
1325 assert_different_registers(obj, k_RInfo, klass_RInfo);
1326
1327 if (should_profile) {
1328 Label not_null;
1329 __ cbnz(obj, not_null);
1330 // Object is null; update MDO and exit
1331 Register mdo = klass_RInfo;
1332 __ mov_metadata(mdo, md->constant_encoding());
1333 Address data_addr
1334 = __ form_address(rscratch2, mdo,
1335 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1336 0);
1337 __ ldrb(rscratch1, data_addr);
1338 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1339 __ strb(rscratch1, data_addr);
1340 __ b(*obj_is_null);
1341 __ bind(not_null);
1342 } else {
1343 __ cbz(obj, *obj_is_null);
1344 }
1345
1346 if (!k->is_loaded()) {
1347 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1348 } else {
1349 __ mov_metadata(k_RInfo, k->constant_encoding());
1350 }
1351 __ verify_oop(obj);
1352
1353 if (op->fast_check()) {
1354 // get object class
1355 // not a safepoint as obj null check happens earlier
1356 __ load_klass(rscratch1, obj);
1357 __ cmp( rscratch1, k_RInfo);
1358
1359 __ br(Assembler::NE, *failure_target);
1360 // successful cast, fall through to profile or jump
1361 } else {
1362 // get object class
1363 // not a safepoint as obj null check happens earlier
1364 __ load_klass(klass_RInfo, obj);
1365 if (k->is_loaded()) {
1366 // See if we get an immediate positive hit
1367 __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1368 __ cmp(k_RInfo, rscratch1);
1369 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1370 __ br(Assembler::NE, *failure_target);
1371 // successful cast, fall through to profile or jump
1372 } else {
1373 // See if we get an immediate positive hit
1374 __ br(Assembler::EQ, *success_target);
1375 // check for self
1376 __ cmp(klass_RInfo, k_RInfo);
1377 __ br(Assembler::EQ, *success_target);
1378
1379 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1380 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1381 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1382 // result is a boolean
1383 __ cbzw(klass_RInfo, *failure_target);
1384 // successful cast, fall through to profile or jump
1385 }
1386 } else {
1387 // perform the fast part of the checking logic
1388 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1389 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1390 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1391 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1392 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1393 // result is a boolean
1394 __ cbz(k_RInfo, *failure_target);
1395 // successful cast, fall through to profile or jump
1396 }
1397 }
1398 if (should_profile) {
1399 Register mdo = klass_RInfo, recv = k_RInfo;
1400 __ bind(profile_cast_success);
1401 __ mov_metadata(mdo, md->constant_encoding());
1402 __ load_klass(recv, obj);
1403 Label update_done;
1404 type_profile_helper(mdo, md, data, recv, success);
1405 __ b(*success);
1406
1407 __ bind(profile_cast_failure);
1408 __ mov_metadata(mdo, md->constant_encoding());
1409 Address counter_addr
1410 = __ form_address(rscratch2, mdo,
1411 md->byte_offset_of_slot(data, CounterData::count_offset()),
1412 0);
1413 __ ldr(rscratch1, counter_addr);
1414 __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1415 __ str(rscratch1, counter_addr);
1416 __ b(*failure);
1417 }
1418 __ b(*success);
1419 }
1420
1421
emit_opTypeCheck(LIR_OpTypeCheck * op)1422 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1423 const bool should_profile = op->should_profile();
1424
1425 LIR_Code code = op->code();
1426 if (code == lir_store_check) {
1427 Register value = op->object()->as_register();
1428 Register array = op->array()->as_register();
1429 Register k_RInfo = op->tmp1()->as_register();
1430 Register klass_RInfo = op->tmp2()->as_register();
1431 Register Rtmp1 = op->tmp3()->as_register();
1432
1433 CodeStub* stub = op->stub();
1434
1435 // check if it needs to be profiled
1436 ciMethodData* md;
1437 ciProfileData* data;
1438
1439 if (should_profile) {
1440 ciMethod* method = op->profiled_method();
1441 assert(method != NULL, "Should have method");
1442 int bci = op->profiled_bci();
1443 md = method->method_data_or_null();
1444 assert(md != NULL, "Sanity");
1445 data = md->bci_to_data(bci);
1446 assert(data != NULL, "need data for type check");
1447 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1448 }
1449 Label profile_cast_success, profile_cast_failure, done;
1450 Label *success_target = should_profile ? &profile_cast_success : &done;
1451 Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
1452
1453 if (should_profile) {
1454 Label not_null;
1455 __ cbnz(value, not_null);
1456 // Object is null; update MDO and exit
1457 Register mdo = klass_RInfo;
1458 __ mov_metadata(mdo, md->constant_encoding());
1459 Address data_addr
1460 = __ form_address(rscratch2, mdo,
1461 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1462 0);
1463 __ ldrb(rscratch1, data_addr);
1464 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1465 __ strb(rscratch1, data_addr);
1466 __ b(done);
1467 __ bind(not_null);
1468 } else {
1469 __ cbz(value, done);
1470 }
1471
1472 add_debug_info_for_null_check_here(op->info_for_exception());
1473 __ load_klass(k_RInfo, array);
1474 __ load_klass(klass_RInfo, value);
1475
1476 // get instance klass (it's already uncompressed)
1477 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1478 // perform the fast part of the checking logic
1479 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1480 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1481 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1482 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1483 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1484 // result is a boolean
1485 __ cbzw(k_RInfo, *failure_target);
1486 // fall through to the success case
1487
1488 if (should_profile) {
1489 Register mdo = klass_RInfo, recv = k_RInfo;
1490 __ bind(profile_cast_success);
1491 __ mov_metadata(mdo, md->constant_encoding());
1492 __ load_klass(recv, value);
1493 Label update_done;
1494 type_profile_helper(mdo, md, data, recv, &done);
1495 __ b(done);
1496
1497 __ bind(profile_cast_failure);
1498 __ mov_metadata(mdo, md->constant_encoding());
1499 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1500 __ lea(rscratch2, counter_addr);
1501 __ ldr(rscratch1, Address(rscratch2));
1502 __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1503 __ str(rscratch1, Address(rscratch2));
1504 __ b(*stub->entry());
1505 }
1506
1507 __ bind(done);
1508 } else if (code == lir_checkcast) {
1509 Register obj = op->object()->as_register();
1510 Register dst = op->result_opr()->as_register();
1511 Label success;
1512 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1513 __ bind(success);
1514 if (dst != obj) {
1515 __ mov(dst, obj);
1516 }
1517 } else if (code == lir_instanceof) {
1518 Register obj = op->object()->as_register();
1519 Register dst = op->result_opr()->as_register();
1520 Label success, failure, done;
1521 emit_typecheck_helper(op, &success, &failure, &failure);
1522 __ bind(failure);
1523 __ mov(dst, zr);
1524 __ b(done);
1525 __ bind(success);
1526 __ mov(dst, 1);
1527 __ bind(done);
1528 } else {
1529 ShouldNotReachHere();
1530 }
1531 }
1532
casw(Register addr,Register newval,Register cmpval)1533 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1534 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1535 __ cset(rscratch1, Assembler::NE);
1536 __ membar(__ AnyAny);
1537 }
1538
casl(Register addr,Register newval,Register cmpval)1539 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1540 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1541 __ cset(rscratch1, Assembler::NE);
1542 __ membar(__ AnyAny);
1543 }
1544
1545
emit_compare_and_swap(LIR_OpCompareAndSwap * op)1546 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1547 assert(VM_Version::supports_cx8(), "wrong machine");
1548 Register addr;
1549 if (op->addr()->is_register()) {
1550 addr = as_reg(op->addr());
1551 } else {
1552 assert(op->addr()->is_address(), "what else?");
1553 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1554 assert(addr_ptr->disp() == 0, "need 0 disp");
1555 assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");
1556 addr = as_reg(addr_ptr->base());
1557 }
1558 Register newval = as_reg(op->new_value());
1559 Register cmpval = as_reg(op->cmp_value());
1560
1561 if (op->code() == lir_cas_obj) {
1562 if (UseCompressedOops) {
1563 Register t1 = op->tmp1()->as_register();
1564 assert(op->tmp1()->is_valid(), "must be");
1565 __ encode_heap_oop(t1, cmpval);
1566 cmpval = t1;
1567 __ encode_heap_oop(rscratch2, newval);
1568 newval = rscratch2;
1569 casw(addr, newval, cmpval);
1570 } else {
1571 casl(addr, newval, cmpval);
1572 }
1573 } else if (op->code() == lir_cas_int) {
1574 casw(addr, newval, cmpval);
1575 } else {
1576 casl(addr, newval, cmpval);
1577 }
1578 }
1579
1580
cmove(LIR_Condition condition,LIR_Opr opr1,LIR_Opr opr2,LIR_Opr result,BasicType type)1581 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1582
1583 Assembler::Condition acond, ncond;
1584 switch (condition) {
1585 case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;
1586 case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;
1587 case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;
1588 case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;
1589 case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1590 case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;
1591 case lir_cond_belowEqual:
1592 case lir_cond_aboveEqual:
1593 default: ShouldNotReachHere();
1594 acond = Assembler::EQ; ncond = Assembler::NE; // unreachable
1595 }
1596
1597 assert(result->is_single_cpu() || result->is_double_cpu(),
1598 "expect single register for result");
1599 if (opr1->is_constant() && opr2->is_constant()
1600 && opr1->type() == T_INT && opr2->type() == T_INT) {
1601 jint val1 = opr1->as_jint();
1602 jint val2 = opr2->as_jint();
1603 if (val1 == 0 && val2 == 1) {
1604 __ cset(result->as_register(), ncond);
1605 return;
1606 } else if (val1 == 1 && val2 == 0) {
1607 __ cset(result->as_register(), acond);
1608 return;
1609 }
1610 }
1611
1612 if (opr1->is_constant() && opr2->is_constant()
1613 && opr1->type() == T_LONG && opr2->type() == T_LONG) {
1614 jlong val1 = opr1->as_jlong();
1615 jlong val2 = opr2->as_jlong();
1616 if (val1 == 0 && val2 == 1) {
1617 __ cset(result->as_register_lo(), ncond);
1618 return;
1619 } else if (val1 == 1 && val2 == 0) {
1620 __ cset(result->as_register_lo(), acond);
1621 return;
1622 }
1623 }
1624
1625 if (opr1->is_stack()) {
1626 stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1627 opr1 = FrameMap::rscratch1_opr;
1628 } else if (opr1->is_constant()) {
1629 LIR_Opr tmp
1630 = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1631 const2reg(opr1, tmp, lir_patch_none, NULL);
1632 opr1 = tmp;
1633 }
1634
1635 if (opr2->is_stack()) {
1636 stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1637 opr2 = FrameMap::rscratch2_opr;
1638 } else if (opr2->is_constant()) {
1639 LIR_Opr tmp
1640 = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1641 const2reg(opr2, tmp, lir_patch_none, NULL);
1642 opr2 = tmp;
1643 }
1644
1645 if (result->type() == T_LONG)
1646 __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1647 else
1648 __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1649 }
1650
arith_op(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dest,CodeEmitInfo * info,bool pop_fpu_stack)1651 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1652 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1653
1654 if (left->is_single_cpu()) {
1655 Register lreg = left->as_register();
1656 Register dreg = as_reg(dest);
1657
1658 if (right->is_single_cpu()) {
1659 // cpu register - cpu register
1660
1661 assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1662 "should be");
1663 Register rreg = right->as_register();
1664 switch (code) {
1665 case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1666 case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1667 case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1668 default: ShouldNotReachHere();
1669 }
1670
1671 } else if (right->is_double_cpu()) {
1672 Register rreg = right->as_register_lo();
1673 // single_cpu + double_cpu: can happen with obj+long
1674 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1675 switch (code) {
1676 case lir_add: __ add(dreg, lreg, rreg); break;
1677 case lir_sub: __ sub(dreg, lreg, rreg); break;
1678 default: ShouldNotReachHere();
1679 }
1680 } else if (right->is_constant()) {
1681 // cpu register - constant
1682 jlong c;
1683
1684 // FIXME. This is fugly: we really need to factor all this logic.
1685 switch(right->type()) {
1686 case T_LONG:
1687 c = right->as_constant_ptr()->as_jlong();
1688 break;
1689 case T_INT:
1690 case T_ADDRESS:
1691 c = right->as_constant_ptr()->as_jint();
1692 break;
1693 default:
1694 ShouldNotReachHere();
1695 c = 0; // unreachable
1696 break;
1697 }
1698
1699 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1700 if (c == 0 && dreg == lreg) {
1701 COMMENT("effective nop elided");
1702 return;
1703 }
1704 switch(left->type()) {
1705 case T_INT:
1706 switch (code) {
1707 case lir_add: __ addw(dreg, lreg, c); break;
1708 case lir_sub: __ subw(dreg, lreg, c); break;
1709 default: ShouldNotReachHere();
1710 }
1711 break;
1712 case T_OBJECT:
1713 case T_ADDRESS:
1714 switch (code) {
1715 case lir_add: __ add(dreg, lreg, c); break;
1716 case lir_sub: __ sub(dreg, lreg, c); break;
1717 default: ShouldNotReachHere();
1718 }
1719 break;
1720 default:
1721 ShouldNotReachHere();
1722 }
1723 } else {
1724 ShouldNotReachHere();
1725 }
1726
1727 } else if (left->is_double_cpu()) {
1728 Register lreg_lo = left->as_register_lo();
1729
1730 if (right->is_double_cpu()) {
1731 // cpu register - cpu register
1732 Register rreg_lo = right->as_register_lo();
1733 switch (code) {
1734 case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1735 case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1736 case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1737 case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1738 case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1739 default:
1740 ShouldNotReachHere();
1741 }
1742
1743 } else if (right->is_constant()) {
1744 jlong c = right->as_constant_ptr()->as_jlong();
1745 Register dreg = as_reg(dest);
1746 switch (code) {
1747 case lir_add:
1748 case lir_sub:
1749 if (c == 0 && dreg == lreg_lo) {
1750 COMMENT("effective nop elided");
1751 return;
1752 }
1753 code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);
1754 break;
1755 case lir_div:
1756 assert(c > 0 && is_power_of_2_long(c), "divisor must be power-of-2 constant");
1757 if (c == 1) {
1758 // move lreg_lo to dreg if divisor is 1
1759 __ mov(dreg, lreg_lo);
1760 } else {
1761 unsigned int shift = exact_log2_long(c);
1762 // use rscratch1 as intermediate result register
1763 __ asr(rscratch1, lreg_lo, 63);
1764 __ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);
1765 __ asr(dreg, rscratch1, shift);
1766 }
1767 break;
1768 case lir_rem:
1769 assert(c > 0 && is_power_of_2_long(c), "divisor must be power-of-2 constant");
1770 if (c == 1) {
1771 // move 0 to dreg if divisor is 1
1772 __ mov(dreg, zr);
1773 } else {
1774 // use rscratch1 as intermediate result register
1775 __ negs(rscratch1, lreg_lo);
1776 __ andr(dreg, lreg_lo, c - 1);
1777 __ andr(rscratch1, rscratch1, c - 1);
1778 __ csneg(dreg, dreg, rscratch1, Assembler::MI);
1779 }
1780 break;
1781 default:
1782 ShouldNotReachHere();
1783 }
1784 } else {
1785 ShouldNotReachHere();
1786 }
1787 } else if (left->is_single_fpu()) {
1788 assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1789 switch (code) {
1790 case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1791 case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1792 case lir_mul_strictfp: // fall through
1793 case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1794 case lir_div_strictfp: // fall through
1795 case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1796 default:
1797 ShouldNotReachHere();
1798 }
1799 } else if (left->is_double_fpu()) {
1800 if (right->is_double_fpu()) {
1801 // fpu register - fpu register
1802 switch (code) {
1803 case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1804 case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1805 case lir_mul_strictfp: // fall through
1806 case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1807 case lir_div_strictfp: // fall through
1808 case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1809 default:
1810 ShouldNotReachHere();
1811 }
1812 } else {
1813 if (right->is_constant()) {
1814 ShouldNotReachHere();
1815 }
1816 ShouldNotReachHere();
1817 }
1818 } else if (left->is_single_stack() || left->is_address()) {
1819 assert(left == dest, "left and dest must be equal");
1820 ShouldNotReachHere();
1821 } else {
1822 ShouldNotReachHere();
1823 }
1824 }
1825
arith_fpu_implementation(LIR_Code code,int left_index,int right_index,int dest_index,bool pop_fpu_stack)1826 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1827
1828
intrinsic_op(LIR_Code code,LIR_Opr value,LIR_Opr unused,LIR_Opr dest,LIR_Op * op)1829 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1830 switch(code) {
1831 case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1832 case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1833 default : ShouldNotReachHere();
1834 }
1835 }
1836
logic_op(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dst)1837 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1838
1839 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1840 Register Rleft = left->is_single_cpu() ? left->as_register() :
1841 left->as_register_lo();
1842 if (dst->is_single_cpu()) {
1843 Register Rdst = dst->as_register();
1844 if (right->is_constant()) {
1845 switch (code) {
1846 case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1847 case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;
1848 case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1849 default: ShouldNotReachHere(); break;
1850 }
1851 } else {
1852 Register Rright = right->is_single_cpu() ? right->as_register() :
1853 right->as_register_lo();
1854 switch (code) {
1855 case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1856 case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;
1857 case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1858 default: ShouldNotReachHere(); break;
1859 }
1860 }
1861 } else {
1862 Register Rdst = dst->as_register_lo();
1863 if (right->is_constant()) {
1864 switch (code) {
1865 case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1866 case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;
1867 case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1868 default: ShouldNotReachHere(); break;
1869 }
1870 } else {
1871 Register Rright = right->is_single_cpu() ? right->as_register() :
1872 right->as_register_lo();
1873 switch (code) {
1874 case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1875 case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;
1876 case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1877 default: ShouldNotReachHere(); break;
1878 }
1879 }
1880 }
1881 }
1882
1883
1884
arithmetic_idiv(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr illegal,LIR_Opr result,CodeEmitInfo * info)1885 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {
1886
1887 // opcode check
1888 assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");
1889 bool is_irem = (code == lir_irem);
1890
1891 // operand check
1892 assert(left->is_single_cpu(), "left must be register");
1893 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
1894 assert(result->is_single_cpu(), "result must be register");
1895 Register lreg = left->as_register();
1896 Register dreg = result->as_register();
1897
1898 // power-of-2 constant check and codegen
1899 if (right->is_constant()) {
1900 int c = right->as_constant_ptr()->as_jint();
1901 assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");
1902 if (is_irem) {
1903 if (c == 1) {
1904 // move 0 to dreg if divisor is 1
1905 __ movw(dreg, zr);
1906 } else {
1907 // use rscratch1 as intermediate result register
1908 __ negsw(rscratch1, lreg);
1909 __ andw(dreg, lreg, c - 1);
1910 __ andw(rscratch1, rscratch1, c - 1);
1911 __ csnegw(dreg, dreg, rscratch1, Assembler::MI);
1912 }
1913 } else {
1914 if (c == 1) {
1915 // move lreg to dreg if divisor is 1
1916 __ movw(dreg, lreg);
1917 } else {
1918 unsigned int shift = exact_log2(c);
1919 // use rscratch1 as intermediate result register
1920 __ asrw(rscratch1, lreg, 31);
1921 __ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
1922 __ asrw(dreg, rscratch1, shift);
1923 }
1924 }
1925 } else {
1926 Register rreg = right->as_register();
1927 __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1928 }
1929 }
1930
1931
comp_op(LIR_Condition condition,LIR_Opr opr1,LIR_Opr opr2,LIR_Op2 * op)1932 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1933 if (opr1->is_constant() && opr2->is_single_cpu()) {
1934 // tableswitch
1935 Register reg = as_reg(opr2);
1936 struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1937 __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1938 } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1939 Register reg1 = as_reg(opr1);
1940 if (opr2->is_single_cpu()) {
1941 // cpu register - cpu register
1942 Register reg2 = opr2->as_register();
1943 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1944 __ cmpoop(reg1, reg2);
1945 } else {
1946 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1947 __ cmpw(reg1, reg2);
1948 }
1949 return;
1950 }
1951 if (opr2->is_double_cpu()) {
1952 // cpu register - cpu register
1953 Register reg2 = opr2->as_register_lo();
1954 __ cmp(reg1, reg2);
1955 return;
1956 }
1957
1958 if (opr2->is_constant()) {
1959 bool is_32bit = false; // width of register operand
1960 jlong imm;
1961
1962 switch(opr2->type()) {
1963 case T_INT:
1964 imm = opr2->as_constant_ptr()->as_jint();
1965 is_32bit = true;
1966 break;
1967 case T_LONG:
1968 imm = opr2->as_constant_ptr()->as_jlong();
1969 break;
1970 case T_ADDRESS:
1971 imm = opr2->as_constant_ptr()->as_jint();
1972 break;
1973 case T_METADATA:
1974 imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1975 break;
1976 case T_OBJECT:
1977 case T_ARRAY:
1978 jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1979 __ cmpoop(reg1, rscratch1);
1980 return;
1981 default:
1982 ShouldNotReachHere();
1983 imm = 0; // unreachable
1984 break;
1985 }
1986
1987 if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1988 if (is_32bit)
1989 __ cmpw(reg1, imm);
1990 else
1991 __ subs(zr, reg1, imm);
1992 return;
1993 } else {
1994 __ mov(rscratch1, imm);
1995 if (is_32bit)
1996 __ cmpw(reg1, rscratch1);
1997 else
1998 __ cmp(reg1, rscratch1);
1999 return;
2000 }
2001 } else
2002 ShouldNotReachHere();
2003 } else if (opr1->is_single_fpu()) {
2004 FloatRegister reg1 = opr1->as_float_reg();
2005 assert(opr2->is_single_fpu(), "expect single float register");
2006 FloatRegister reg2 = opr2->as_float_reg();
2007 __ fcmps(reg1, reg2);
2008 } else if (opr1->is_double_fpu()) {
2009 FloatRegister reg1 = opr1->as_double_reg();
2010 assert(opr2->is_double_fpu(), "expect double float register");
2011 FloatRegister reg2 = opr2->as_double_reg();
2012 __ fcmpd(reg1, reg2);
2013 } else {
2014 ShouldNotReachHere();
2015 }
2016 }
2017
comp_fl2i(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dst,LIR_Op2 * op)2018 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2019 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2020 bool is_unordered_less = (code == lir_ucmp_fd2i);
2021 if (left->is_single_fpu()) {
2022 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2023 } else if (left->is_double_fpu()) {
2024 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2025 } else {
2026 ShouldNotReachHere();
2027 }
2028 } else if (code == lir_cmp_l2i) {
2029 Label done;
2030 __ cmp(left->as_register_lo(), right->as_register_lo());
2031 __ mov(dst->as_register(), (u_int64_t)-1L);
2032 __ br(Assembler::LT, done);
2033 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2034 __ bind(done);
2035 } else {
2036 ShouldNotReachHere();
2037 }
2038 }
2039
2040
align_call(LIR_Code code)2041 void LIR_Assembler::align_call(LIR_Code code) { }
2042
2043
call(LIR_OpJavaCall * op,relocInfo::relocType rtype)2044 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2045 address call = __ trampoline_call(Address(op->addr(), rtype));
2046 if (call == NULL) {
2047 bailout("trampoline stub overflow");
2048 return;
2049 }
2050 add_call_info(code_offset(), op->info());
2051 }
2052
2053
ic_call(LIR_OpJavaCall * op)2054 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2055 address call = __ ic_call(op->addr());
2056 if (call == NULL) {
2057 bailout("trampoline stub overflow");
2058 return;
2059 }
2060 add_call_info(code_offset(), op->info());
2061 }
2062
2063
2064 /* Currently, vtable-dispatch is only enabled for sparc platforms */
vtable_call(LIR_OpJavaCall * op)2065 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2066 ShouldNotReachHere();
2067 }
2068
2069
emit_static_call_stub()2070 void LIR_Assembler::emit_static_call_stub() {
2071 address call_pc = __ pc();
2072 address stub = __ start_a_stub(call_stub_size());
2073 if (stub == NULL) {
2074 bailout("static call stub overflow");
2075 return;
2076 }
2077
2078 int start = __ offset();
2079
2080 __ relocate(static_stub_Relocation::spec(call_pc));
2081 __ emit_static_call_stub();
2082
2083 assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2084 <= call_stub_size(), "stub too big");
2085 __ end_a_stub();
2086 }
2087
2088
throw_op(LIR_Opr exceptionPC,LIR_Opr exceptionOop,CodeEmitInfo * info)2089 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2090 assert(exceptionOop->as_register() == r0, "must match");
2091 assert(exceptionPC->as_register() == r3, "must match");
2092
2093 // exception object is not added to oop map by LinearScan
2094 // (LinearScan assumes that no oops are in fixed registers)
2095 info->add_register_oop(exceptionOop);
2096 Runtime1::StubID unwind_id;
2097
2098 // get current pc information
2099 // pc is only needed if the method has an exception handler, the unwind code does not need it.
2100 int pc_for_athrow_offset = __ offset();
2101 InternalAddress pc_for_athrow(__ pc());
2102 __ adr(exceptionPC->as_register(), pc_for_athrow);
2103 add_call_info(pc_for_athrow_offset, info); // for exception handler
2104
2105 __ verify_not_null_oop(r0);
2106 // search an exception handler (r0: exception oop, r3: throwing pc)
2107 if (compilation()->has_fpu_code()) {
2108 unwind_id = Runtime1::handle_exception_id;
2109 } else {
2110 unwind_id = Runtime1::handle_exception_nofpu_id;
2111 }
2112 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2113
2114 // FIXME: enough room for two byte trap ????
2115 __ nop();
2116 }
2117
2118
unwind_op(LIR_Opr exceptionOop)2119 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2120 assert(exceptionOop->as_register() == r0, "must match");
2121
2122 __ b(_unwind_handler_entry);
2123 }
2124
2125
shift_op(LIR_Code code,LIR_Opr left,LIR_Opr count,LIR_Opr dest,LIR_Opr tmp)2126 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2127 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2128 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2129
2130 switch (left->type()) {
2131 case T_INT: {
2132 switch (code) {
2133 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;
2134 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;
2135 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2136 default:
2137 ShouldNotReachHere();
2138 break;
2139 }
2140 break;
2141 case T_LONG:
2142 case T_ADDRESS:
2143 case T_OBJECT:
2144 switch (code) {
2145 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
2146 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
2147 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2148 default:
2149 ShouldNotReachHere();
2150 break;
2151 }
2152 break;
2153 default:
2154 ShouldNotReachHere();
2155 break;
2156 }
2157 }
2158 }
2159
2160
shift_op(LIR_Code code,LIR_Opr left,jint count,LIR_Opr dest)2161 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2162 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2163 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2164
2165 switch (left->type()) {
2166 case T_INT: {
2167 switch (code) {
2168 case lir_shl: __ lslw (dreg, lreg, count); break;
2169 case lir_shr: __ asrw (dreg, lreg, count); break;
2170 case lir_ushr: __ lsrw (dreg, lreg, count); break;
2171 default:
2172 ShouldNotReachHere();
2173 break;
2174 }
2175 break;
2176 case T_LONG:
2177 case T_ADDRESS:
2178 case T_OBJECT:
2179 switch (code) {
2180 case lir_shl: __ lsl (dreg, lreg, count); break;
2181 case lir_shr: __ asr (dreg, lreg, count); break;
2182 case lir_ushr: __ lsr (dreg, lreg, count); break;
2183 default:
2184 ShouldNotReachHere();
2185 break;
2186 }
2187 break;
2188 default:
2189 ShouldNotReachHere();
2190 break;
2191 }
2192 }
2193 }
2194
2195
store_parameter(Register r,int offset_from_rsp_in_words)2196 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2197 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2198 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2199 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2200 __ str (r, Address(sp, offset_from_rsp_in_bytes));
2201 }
2202
2203
store_parameter(jint c,int offset_from_rsp_in_words)2204 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2205 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2206 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2207 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2208 __ mov (rscratch1, c);
2209 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2210 }
2211
2212
store_parameter(jobject o,int offset_from_rsp_in_words)2213 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2214 ShouldNotReachHere();
2215 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2216 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2217 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2218 __ lea(rscratch1, __ constant_oop_address(o));
2219 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2220 }
2221
2222
2223 // This code replaces a call to arraycopy; no exception may
2224 // be thrown in this code, they must be thrown in the System.arraycopy
2225 // activation frame; we could save some checks if this would not be the case
emit_arraycopy(LIR_OpArrayCopy * op)2226 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2227 ciArrayKlass* default_type = op->expected_type();
2228 Register src = op->src()->as_register();
2229 Register dst = op->dst()->as_register();
2230 Register src_pos = op->src_pos()->as_register();
2231 Register dst_pos = op->dst_pos()->as_register();
2232 Register length = op->length()->as_register();
2233 Register tmp = op->tmp()->as_register();
2234
2235 __ resolve(ACCESS_READ, src);
2236 __ resolve(ACCESS_WRITE, dst);
2237
2238 CodeStub* stub = op->stub();
2239 int flags = op->flags();
2240 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2241 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2242
2243 // if we don't know anything, just go through the generic arraycopy
2244 if (default_type == NULL // || basic_type == T_OBJECT
2245 ) {
2246 Label done;
2247 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2248
2249 // Save the arguments in case the generic arraycopy fails and we
2250 // have to fall back to the JNI stub
2251 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2252 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2253 __ str(src, Address(sp, 4*BytesPerWord));
2254
2255 address copyfunc_addr = StubRoutines::generic_arraycopy();
2256 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2257
2258 // The arguments are in java calling convention so we shift them
2259 // to C convention
2260 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2261 __ mov(c_rarg0, j_rarg0);
2262 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2263 __ mov(c_rarg1, j_rarg1);
2264 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2265 __ mov(c_rarg2, j_rarg2);
2266 assert_different_registers(c_rarg3, j_rarg4);
2267 __ mov(c_rarg3, j_rarg3);
2268 __ mov(c_rarg4, j_rarg4);
2269 #ifndef PRODUCT
2270 if (PrintC1Statistics) {
2271 __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2272 }
2273 #endif
2274 __ far_call(RuntimeAddress(copyfunc_addr));
2275
2276 __ cbz(r0, *stub->continuation());
2277
2278 // Reload values from the stack so they are where the stub
2279 // expects them.
2280 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2281 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2282 __ ldr(src, Address(sp, 4*BytesPerWord));
2283
2284 // r0 is -1^K where K == partial copied count
2285 __ eonw(rscratch1, r0, zr);
2286 // adjust length down and src/end pos up by partial copied count
2287 __ subw(length, length, rscratch1);
2288 __ addw(src_pos, src_pos, rscratch1);
2289 __ addw(dst_pos, dst_pos, rscratch1);
2290 __ b(*stub->entry());
2291
2292 __ bind(*stub->continuation());
2293 return;
2294 }
2295
2296 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2297
2298 int elem_size = type2aelembytes(basic_type);
2299 int shift_amount;
2300 int scale = exact_log2(elem_size);
2301
2302 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2303 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2304 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2305 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2306
2307 // test for NULL
2308 if (flags & LIR_OpArrayCopy::src_null_check) {
2309 __ cbz(src, *stub->entry());
2310 }
2311 if (flags & LIR_OpArrayCopy::dst_null_check) {
2312 __ cbz(dst, *stub->entry());
2313 }
2314
2315 // If the compiler was not able to prove that exact type of the source or the destination
2316 // of the arraycopy is an array type, check at runtime if the source or the destination is
2317 // an instance type.
2318 if (flags & LIR_OpArrayCopy::type_check) {
2319 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2320 __ load_klass(tmp, dst);
2321 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2322 __ cmpw(rscratch1, Klass::_lh_neutral_value);
2323 __ br(Assembler::GE, *stub->entry());
2324 }
2325
2326 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2327 __ load_klass(tmp, src);
2328 __ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2329 __ cmpw(rscratch1, Klass::_lh_neutral_value);
2330 __ br(Assembler::GE, *stub->entry());
2331 }
2332 }
2333
2334 // check if negative
2335 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2336 __ cmpw(src_pos, 0);
2337 __ br(Assembler::LT, *stub->entry());
2338 }
2339 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2340 __ cmpw(dst_pos, 0);
2341 __ br(Assembler::LT, *stub->entry());
2342 }
2343
2344 if (flags & LIR_OpArrayCopy::length_positive_check) {
2345 __ cmpw(length, 0);
2346 __ br(Assembler::LT, *stub->entry());
2347 }
2348
2349 if (flags & LIR_OpArrayCopy::src_range_check) {
2350 __ addw(tmp, src_pos, length);
2351 __ ldrw(rscratch1, src_length_addr);
2352 __ cmpw(tmp, rscratch1);
2353 __ br(Assembler::HI, *stub->entry());
2354 }
2355 if (flags & LIR_OpArrayCopy::dst_range_check) {
2356 __ addw(tmp, dst_pos, length);
2357 __ ldrw(rscratch1, dst_length_addr);
2358 __ cmpw(tmp, rscratch1);
2359 __ br(Assembler::HI, *stub->entry());
2360 }
2361
2362 if (flags & LIR_OpArrayCopy::type_check) {
2363 // We don't know the array types are compatible
2364 if (basic_type != T_OBJECT) {
2365 // Simple test for basic type arrays
2366 if (UseCompressedClassPointers) {
2367 __ ldrw(tmp, src_klass_addr);
2368 __ ldrw(rscratch1, dst_klass_addr);
2369 __ cmpw(tmp, rscratch1);
2370 } else {
2371 __ ldr(tmp, src_klass_addr);
2372 __ ldr(rscratch1, dst_klass_addr);
2373 __ cmp(tmp, rscratch1);
2374 }
2375 __ br(Assembler::NE, *stub->entry());
2376 } else {
2377 // For object arrays, if src is a sub class of dst then we can
2378 // safely do the copy.
2379 Label cont, slow;
2380
2381 #define PUSH(r1, r2) \
2382 stp(r1, r2, __ pre(sp, -2 * wordSize));
2383
2384 #define POP(r1, r2) \
2385 ldp(r1, r2, __ post(sp, 2 * wordSize));
2386
2387 __ PUSH(src, dst);
2388
2389 __ load_klass(src, src);
2390 __ load_klass(dst, dst);
2391
2392 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2393
2394 __ PUSH(src, dst);
2395 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2396 __ POP(src, dst);
2397
2398 __ cbnz(src, cont);
2399
2400 __ bind(slow);
2401 __ POP(src, dst);
2402
2403 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2404 if (copyfunc_addr != NULL) { // use stub if available
2405 // src is not a sub class of dst so we have to do a
2406 // per-element check.
2407
2408 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2409 if ((flags & mask) != mask) {
2410 // Check that at least both of them object arrays.
2411 assert(flags & mask, "one of the two should be known to be an object array");
2412
2413 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2414 __ load_klass(tmp, src);
2415 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2416 __ load_klass(tmp, dst);
2417 }
2418 int lh_offset = in_bytes(Klass::layout_helper_offset());
2419 Address klass_lh_addr(tmp, lh_offset);
2420 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2421 __ ldrw(rscratch1, klass_lh_addr);
2422 __ mov(rscratch2, objArray_lh);
2423 __ eorw(rscratch1, rscratch1, rscratch2);
2424 __ cbnzw(rscratch1, *stub->entry());
2425 }
2426
2427 // Spill because stubs can use any register they like and it's
2428 // easier to restore just those that we care about.
2429 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2430 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2431 __ str(src, Address(sp, 4*BytesPerWord));
2432
2433 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2434 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2435 assert_different_registers(c_rarg0, dst, dst_pos, length);
2436 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2437 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2438 assert_different_registers(c_rarg1, dst, length);
2439 __ uxtw(c_rarg2, length);
2440 assert_different_registers(c_rarg2, dst);
2441
2442 __ load_klass(c_rarg4, dst);
2443 __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2444 __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2445 __ far_call(RuntimeAddress(copyfunc_addr));
2446
2447 #ifndef PRODUCT
2448 if (PrintC1Statistics) {
2449 Label failed;
2450 __ cbnz(r0, failed);
2451 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2452 __ bind(failed);
2453 }
2454 #endif
2455
2456 __ cbz(r0, *stub->continuation());
2457
2458 #ifndef PRODUCT
2459 if (PrintC1Statistics) {
2460 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2461 }
2462 #endif
2463 assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2464
2465 // Restore previously spilled arguments
2466 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2467 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2468 __ ldr(src, Address(sp, 4*BytesPerWord));
2469
2470 // return value is -1^K where K is partial copied count
2471 __ eonw(rscratch1, r0, zr);
2472 // adjust length down and src/end pos up by partial copied count
2473 __ subw(length, length, rscratch1);
2474 __ addw(src_pos, src_pos, rscratch1);
2475 __ addw(dst_pos, dst_pos, rscratch1);
2476 }
2477
2478 __ b(*stub->entry());
2479
2480 __ bind(cont);
2481 __ POP(src, dst);
2482 }
2483 }
2484
2485 #ifdef ASSERT
2486 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2487 // Sanity check the known type with the incoming class. For the
2488 // primitive case the types must match exactly with src.klass and
2489 // dst.klass each exactly matching the default type. For the
2490 // object array case, if no type check is needed then either the
2491 // dst type is exactly the expected type and the src type is a
2492 // subtype which we can't check or src is the same array as dst
2493 // but not necessarily exactly of type default_type.
2494 Label known_ok, halt;
2495 __ mov_metadata(tmp, default_type->constant_encoding());
2496 if (UseCompressedClassPointers) {
2497 __ encode_klass_not_null(tmp);
2498 }
2499
2500 if (basic_type != T_OBJECT) {
2501
2502 if (UseCompressedClassPointers) {
2503 __ ldrw(rscratch1, dst_klass_addr);
2504 __ cmpw(tmp, rscratch1);
2505 } else {
2506 __ ldr(rscratch1, dst_klass_addr);
2507 __ cmp(tmp, rscratch1);
2508 }
2509 __ br(Assembler::NE, halt);
2510 if (UseCompressedClassPointers) {
2511 __ ldrw(rscratch1, src_klass_addr);
2512 __ cmpw(tmp, rscratch1);
2513 } else {
2514 __ ldr(rscratch1, src_klass_addr);
2515 __ cmp(tmp, rscratch1);
2516 }
2517 __ br(Assembler::EQ, known_ok);
2518 } else {
2519 if (UseCompressedClassPointers) {
2520 __ ldrw(rscratch1, dst_klass_addr);
2521 __ cmpw(tmp, rscratch1);
2522 } else {
2523 __ ldr(rscratch1, dst_klass_addr);
2524 __ cmp(tmp, rscratch1);
2525 }
2526 __ br(Assembler::EQ, known_ok);
2527 __ cmp(src, dst);
2528 __ br(Assembler::EQ, known_ok);
2529 }
2530 __ bind(halt);
2531 __ stop("incorrect type information in arraycopy");
2532 __ bind(known_ok);
2533 }
2534 #endif
2535
2536 #ifndef PRODUCT
2537 if (PrintC1Statistics) {
2538 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2539 }
2540 #endif
2541
2542 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2543 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2544 assert_different_registers(c_rarg0, dst, dst_pos, length);
2545 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2546 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2547 assert_different_registers(c_rarg1, dst, length);
2548 __ uxtw(c_rarg2, length);
2549 assert_different_registers(c_rarg2, dst);
2550
2551 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2552 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2553 const char *name;
2554 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2555
2556 CodeBlob *cb = CodeCache::find_blob(entry);
2557 if (cb) {
2558 __ far_call(RuntimeAddress(entry));
2559 } else {
2560 __ call_VM_leaf(entry, 3);
2561 }
2562
2563 __ bind(*stub->continuation());
2564 }
2565
2566
2567
2568
emit_lock(LIR_OpLock * op)2569 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2570 Register obj = op->obj_opr()->as_register(); // may not be an oop
2571 Register hdr = op->hdr_opr()->as_register();
2572 Register lock = op->lock_opr()->as_register();
2573 if (!UseFastLocking) {
2574 __ b(*op->stub()->entry());
2575 } else if (op->code() == lir_lock) {
2576 Register scratch = noreg;
2577 if (UseBiasedLocking) {
2578 scratch = op->scratch_opr()->as_register();
2579 }
2580 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2581 __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
2582 // add debug info for NullPointerException only if one is possible
2583 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2584 if (op->info() != NULL) {
2585 add_debug_info_for_null_check(null_check_offset, op->info());
2586 }
2587 // done
2588 } else if (op->code() == lir_unlock) {
2589 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2590 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2591 } else {
2592 Unimplemented();
2593 }
2594 __ bind(*op->stub()->continuation());
2595 }
2596
2597
emit_profile_call(LIR_OpProfileCall * op)2598 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2599 ciMethod* method = op->profiled_method();
2600 int bci = op->profiled_bci();
2601 ciMethod* callee = op->profiled_callee();
2602
2603 // Update counter for all call types
2604 ciMethodData* md = method->method_data_or_null();
2605 assert(md != NULL, "Sanity");
2606 ciProfileData* data = md->bci_to_data(bci);
2607 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2608 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2609 Register mdo = op->mdo()->as_register();
2610 __ mov_metadata(mdo, md->constant_encoding());
2611 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2612 // Perform additional virtual call profiling for invokevirtual and
2613 // invokeinterface bytecodes
2614 if (op->should_profile_receiver_type()) {
2615 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2616 Register recv = op->recv()->as_register();
2617 assert_different_registers(mdo, recv);
2618 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2619 ciKlass* known_klass = op->known_holder();
2620 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2621 // We know the type that will be seen at this call site; we can
2622 // statically update the MethodData* rather than needing to do
2623 // dynamic tests on the receiver type
2624
2625 // NOTE: we should probably put a lock around this search to
2626 // avoid collisions by concurrent compilations
2627 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2628 uint i;
2629 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2630 ciKlass* receiver = vc_data->receiver(i);
2631 if (known_klass->equals(receiver)) {
2632 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2633 __ addptr(data_addr, DataLayout::counter_increment);
2634 return;
2635 }
2636 }
2637
2638 // Receiver type not found in profile data; select an empty slot
2639
2640 // Note that this is less efficient than it should be because it
2641 // always does a write to the receiver part of the
2642 // VirtualCallData rather than just the first time
2643 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2644 ciKlass* receiver = vc_data->receiver(i);
2645 if (receiver == NULL) {
2646 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2647 __ mov_metadata(rscratch1, known_klass->constant_encoding());
2648 __ lea(rscratch2, recv_addr);
2649 __ str(rscratch1, Address(rscratch2));
2650 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2651 __ addptr(data_addr, DataLayout::counter_increment);
2652 return;
2653 }
2654 }
2655 } else {
2656 __ load_klass(recv, recv);
2657 Label update_done;
2658 type_profile_helper(mdo, md, data, recv, &update_done);
2659 // Receiver did not match any saved receiver and there is no empty row for it.
2660 // Increment total counter to indicate polymorphic case.
2661 __ addptr(counter_addr, DataLayout::counter_increment);
2662
2663 __ bind(update_done);
2664 }
2665 } else {
2666 // Static call
2667 __ addptr(counter_addr, DataLayout::counter_increment);
2668 }
2669 }
2670
2671
emit_delay(LIR_OpDelay *)2672 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2673 Unimplemented();
2674 }
2675
2676
monitor_address(int monitor_no,LIR_Opr dst)2677 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2678 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2679 }
2680
emit_updatecrc32(LIR_OpUpdateCRC32 * op)2681 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2682 assert(op->crc()->is_single_cpu(), "crc must be register");
2683 assert(op->val()->is_single_cpu(), "byte value must be register");
2684 assert(op->result_opr()->is_single_cpu(), "result must be register");
2685 Register crc = op->crc()->as_register();
2686 Register val = op->val()->as_register();
2687 Register res = op->result_opr()->as_register();
2688
2689 assert_different_registers(val, crc, res);
2690 unsigned long offset;
2691 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2692 if (offset) __ add(res, res, offset);
2693
2694 __ mvnw(crc, crc); // ~crc
2695 __ update_byte_crc32(crc, val, res);
2696 __ mvnw(res, crc); // ~crc
2697 }
2698
emit_profile_type(LIR_OpProfileType * op)2699 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2700 COMMENT("emit_profile_type {");
2701 Register obj = op->obj()->as_register();
2702 Register tmp = op->tmp()->as_pointer_register();
2703 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2704 ciKlass* exact_klass = op->exact_klass();
2705 intptr_t current_klass = op->current_klass();
2706 bool not_null = op->not_null();
2707 bool no_conflict = op->no_conflict();
2708
2709 Label update, next, none;
2710
2711 bool do_null = !not_null;
2712 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2713 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2714
2715 assert(do_null || do_update, "why are we here?");
2716 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2717 assert(mdo_addr.base() != rscratch1, "wrong register");
2718
2719 __ verify_oop(obj);
2720
2721 if (tmp != obj) {
2722 __ mov(tmp, obj);
2723 }
2724 if (do_null) {
2725 __ cbnz(tmp, update);
2726 if (!TypeEntries::was_null_seen(current_klass)) {
2727 __ ldr(rscratch2, mdo_addr);
2728 __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2729 __ str(rscratch2, mdo_addr);
2730 }
2731 if (do_update) {
2732 #ifndef ASSERT
2733 __ b(next);
2734 }
2735 #else
2736 __ b(next);
2737 }
2738 } else {
2739 __ cbnz(tmp, update);
2740 __ stop("unexpected null obj");
2741 #endif
2742 }
2743
2744 __ bind(update);
2745
2746 if (do_update) {
2747 #ifdef ASSERT
2748 if (exact_klass != NULL) {
2749 Label ok;
2750 __ load_klass(tmp, tmp);
2751 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2752 __ eor(rscratch1, tmp, rscratch1);
2753 __ cbz(rscratch1, ok);
2754 __ stop("exact klass and actual klass differ");
2755 __ bind(ok);
2756 }
2757 #endif
2758 if (!no_conflict) {
2759 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2760 if (exact_klass != NULL) {
2761 __ mov_metadata(tmp, exact_klass->constant_encoding());
2762 } else {
2763 __ load_klass(tmp, tmp);
2764 }
2765
2766 __ ldr(rscratch2, mdo_addr);
2767 __ eor(tmp, tmp, rscratch2);
2768 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2769 // klass seen before, nothing to do. The unknown bit may have been
2770 // set already but no need to check.
2771 __ cbz(rscratch1, next);
2772
2773 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2774
2775 if (TypeEntries::is_type_none(current_klass)) {
2776 __ cbz(rscratch2, none);
2777 __ cmp(rscratch2, (u1)TypeEntries::null_seen);
2778 __ br(Assembler::EQ, none);
2779 // There is a chance that the checks above (re-reading profiling
2780 // data from memory) fail if another thread has just set the
2781 // profiling to this obj's klass
2782 __ dmb(Assembler::ISHLD);
2783 __ ldr(rscratch2, mdo_addr);
2784 __ eor(tmp, tmp, rscratch2);
2785 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2786 __ cbz(rscratch1, next);
2787 }
2788 } else {
2789 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2790 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2791
2792 __ ldr(tmp, mdo_addr);
2793 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2794 }
2795
2796 // different than before. Cannot keep accurate profile.
2797 __ ldr(rscratch2, mdo_addr);
2798 __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2799 __ str(rscratch2, mdo_addr);
2800
2801 if (TypeEntries::is_type_none(current_klass)) {
2802 __ b(next);
2803
2804 __ bind(none);
2805 // first time here. Set profile type.
2806 __ str(tmp, mdo_addr);
2807 }
2808 } else {
2809 // There's a single possible klass at this profile point
2810 assert(exact_klass != NULL, "should be");
2811 if (TypeEntries::is_type_none(current_klass)) {
2812 __ mov_metadata(tmp, exact_klass->constant_encoding());
2813 __ ldr(rscratch2, mdo_addr);
2814 __ eor(tmp, tmp, rscratch2);
2815 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2816 __ cbz(rscratch1, next);
2817 #ifdef ASSERT
2818 {
2819 Label ok;
2820 __ ldr(rscratch1, mdo_addr);
2821 __ cbz(rscratch1, ok);
2822 __ cmp(rscratch1, (u1)TypeEntries::null_seen);
2823 __ br(Assembler::EQ, ok);
2824 // may have been set by another thread
2825 __ dmb(Assembler::ISHLD);
2826 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
2827 __ ldr(rscratch2, mdo_addr);
2828 __ eor(rscratch2, rscratch1, rscratch2);
2829 __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2830 __ cbz(rscratch2, ok);
2831
2832 __ stop("unexpected profiling mismatch");
2833 __ bind(ok);
2834 }
2835 #endif
2836 // first time here. Set profile type.
2837 __ str(tmp, mdo_addr);
2838 } else {
2839 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2840 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2841
2842 __ ldr(tmp, mdo_addr);
2843 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2844
2845 __ orr(tmp, tmp, TypeEntries::type_unknown);
2846 __ str(tmp, mdo_addr);
2847 // FIXME: Write barrier needed here?
2848 }
2849 }
2850
2851 __ bind(next);
2852 }
2853 COMMENT("} emit_profile_type");
2854 }
2855
2856
align_backward_branch_target()2857 void LIR_Assembler::align_backward_branch_target() {
2858 }
2859
2860
negate(LIR_Opr left,LIR_Opr dest,LIR_Opr tmp)2861 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2862 // tmp must be unused
2863 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2864
2865 if (left->is_single_cpu()) {
2866 assert(dest->is_single_cpu(), "expect single result reg");
2867 __ negw(dest->as_register(), left->as_register());
2868 } else if (left->is_double_cpu()) {
2869 assert(dest->is_double_cpu(), "expect double result reg");
2870 __ neg(dest->as_register_lo(), left->as_register_lo());
2871 } else if (left->is_single_fpu()) {
2872 assert(dest->is_single_fpu(), "expect single float result reg");
2873 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2874 } else {
2875 assert(left->is_double_fpu(), "expect double float operand reg");
2876 assert(dest->is_double_fpu(), "expect double float result reg");
2877 __ fnegd(dest->as_double_reg(), left->as_double_reg());
2878 }
2879 }
2880
2881
leal(LIR_Opr addr,LIR_Opr dest,LIR_PatchCode patch_code,CodeEmitInfo * info)2882 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2883 if (patch_code != lir_patch_none) {
2884 deoptimize_trap(info);
2885 return;
2886 }
2887
2888 __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2889 }
2890
2891
rt_call(LIR_Opr result,address dest,const LIR_OprList * args,LIR_Opr tmp,CodeEmitInfo * info)2892 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2893 assert(!tmp->is_valid(), "don't need temporary");
2894
2895 CodeBlob *cb = CodeCache::find_blob(dest);
2896 if (cb) {
2897 __ far_call(RuntimeAddress(dest));
2898 } else {
2899 __ mov(rscratch1, RuntimeAddress(dest));
2900 __ blr(rscratch1);
2901 }
2902
2903 if (info != NULL) {
2904 add_call_info_here(info);
2905 }
2906 __ maybe_isb();
2907 }
2908
volatile_move_op(LIR_Opr src,LIR_Opr dest,BasicType type,CodeEmitInfo * info)2909 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2910 if (dest->is_address() || src->is_address()) {
2911 move_op(src, dest, type, lir_patch_none, info,
2912 /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
2913 } else {
2914 ShouldNotReachHere();
2915 }
2916 }
2917
2918 #ifdef ASSERT
2919 // emit run-time assertion
emit_assert(LIR_OpAssert * op)2920 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2921 assert(op->code() == lir_assert, "must be");
2922
2923 if (op->in_opr1()->is_valid()) {
2924 assert(op->in_opr2()->is_valid(), "both operands must be valid");
2925 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2926 } else {
2927 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2928 assert(op->condition() == lir_cond_always, "no other conditions allowed");
2929 }
2930
2931 Label ok;
2932 if (op->condition() != lir_cond_always) {
2933 Assembler::Condition acond = Assembler::AL;
2934 switch (op->condition()) {
2935 case lir_cond_equal: acond = Assembler::EQ; break;
2936 case lir_cond_notEqual: acond = Assembler::NE; break;
2937 case lir_cond_less: acond = Assembler::LT; break;
2938 case lir_cond_lessEqual: acond = Assembler::LE; break;
2939 case lir_cond_greaterEqual: acond = Assembler::GE; break;
2940 case lir_cond_greater: acond = Assembler::GT; break;
2941 case lir_cond_belowEqual: acond = Assembler::LS; break;
2942 case lir_cond_aboveEqual: acond = Assembler::HS; break;
2943 default: ShouldNotReachHere();
2944 }
2945 __ br(acond, ok);
2946 }
2947 if (op->halt()) {
2948 const char* str = __ code_string(op->msg());
2949 __ stop(str);
2950 } else {
2951 breakpoint();
2952 }
2953 __ bind(ok);
2954 }
2955 #endif
2956
2957 #ifndef PRODUCT
2958 #define COMMENT(x) do { __ block_comment(x); } while (0)
2959 #else
2960 #define COMMENT(x)
2961 #endif
2962
membar()2963 void LIR_Assembler::membar() {
2964 COMMENT("membar");
2965 __ membar(MacroAssembler::AnyAny);
2966 }
2967
membar_acquire()2968 void LIR_Assembler::membar_acquire() {
2969 __ membar(Assembler::LoadLoad|Assembler::LoadStore);
2970 }
2971
membar_release()2972 void LIR_Assembler::membar_release() {
2973 __ membar(Assembler::LoadStore|Assembler::StoreStore);
2974 }
2975
membar_loadload()2976 void LIR_Assembler::membar_loadload() {
2977 __ membar(Assembler::LoadLoad);
2978 }
2979
membar_storestore()2980 void LIR_Assembler::membar_storestore() {
2981 __ membar(MacroAssembler::StoreStore);
2982 }
2983
membar_loadstore()2984 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2985
membar_storeload()2986 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2987
on_spin_wait()2988 void LIR_Assembler::on_spin_wait() {
2989 Unimplemented();
2990 }
2991
get_thread(LIR_Opr result_reg)2992 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2993 __ mov(result_reg->as_register(), rthread);
2994 }
2995
2996
peephole(LIR_List * lir)2997 void LIR_Assembler::peephole(LIR_List *lir) {
2998 #if 0
2999 if (tableswitch_count >= max_tableswitches)
3000 return;
3001
3002 /*
3003 This finite-state automaton recognizes sequences of compare-and-
3004 branch instructions. We will turn them into a tableswitch. You
3005 could argue that C1 really shouldn't be doing this sort of
3006 optimization, but without it the code is really horrible.
3007 */
3008
3009 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3010 int first_key, last_key = -2147483648;
3011 int next_key = 0;
3012 int start_insn = -1;
3013 int last_insn = -1;
3014 Register reg = noreg;
3015 LIR_Opr reg_opr;
3016 state = start_s;
3017
3018 LIR_OpList* inst = lir->instructions_list();
3019 for (int i = 0; i < inst->length(); i++) {
3020 LIR_Op* op = inst->at(i);
3021 switch (state) {
3022 case start_s:
3023 first_key = -1;
3024 start_insn = i;
3025 switch (op->code()) {
3026 case lir_cmp:
3027 LIR_Opr opr1 = op->as_Op2()->in_opr1();
3028 LIR_Opr opr2 = op->as_Op2()->in_opr2();
3029 if (opr1->is_cpu_register() && opr1->is_single_cpu()
3030 && opr2->is_constant()
3031 && opr2->type() == T_INT) {
3032 reg_opr = opr1;
3033 reg = opr1->as_register();
3034 first_key = opr2->as_constant_ptr()->as_jint();
3035 next_key = first_key + 1;
3036 state = cmp_s;
3037 goto next_state;
3038 }
3039 break;
3040 }
3041 break;
3042 case cmp_s:
3043 switch (op->code()) {
3044 case lir_branch:
3045 if (op->as_OpBranch()->cond() == lir_cond_equal) {
3046 state = beq_s;
3047 last_insn = i;
3048 goto next_state;
3049 }
3050 }
3051 state = start_s;
3052 break;
3053 case beq_s:
3054 switch (op->code()) {
3055 case lir_cmp: {
3056 LIR_Opr opr1 = op->as_Op2()->in_opr1();
3057 LIR_Opr opr2 = op->as_Op2()->in_opr2();
3058 if (opr1->is_cpu_register() && opr1->is_single_cpu()
3059 && opr1->as_register() == reg
3060 && opr2->is_constant()
3061 && opr2->type() == T_INT
3062 && opr2->as_constant_ptr()->as_jint() == next_key) {
3063 last_key = next_key;
3064 next_key++;
3065 state = cmp_s;
3066 goto next_state;
3067 }
3068 }
3069 }
3070 last_key = next_key;
3071 state = start_s;
3072 break;
3073 default:
3074 assert(false, "impossible state");
3075 }
3076 if (state == start_s) {
3077 if (first_key < last_key - 5L && reg != noreg) {
3078 {
3079 // printf("found run register %d starting at insn %d low value %d high value %d\n",
3080 // reg->encoding(),
3081 // start_insn, first_key, last_key);
3082 // for (int i = 0; i < inst->length(); i++) {
3083 // inst->at(i)->print();
3084 // tty->print("\n");
3085 // }
3086 // tty->print("\n");
3087 }
3088
3089 struct tableswitch *sw = &switches[tableswitch_count];
3090 sw->_insn_index = start_insn, sw->_first_key = first_key,
3091 sw->_last_key = last_key, sw->_reg = reg;
3092 inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3093 {
3094 // Insert the new table of branches
3095 int offset = last_insn;
3096 for (int n = first_key; n < last_key; n++) {
3097 inst->insert_before
3098 (last_insn + 1,
3099 new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3100 inst->at(offset)->as_OpBranch()->label()));
3101 offset -= 2, i++;
3102 }
3103 }
3104 // Delete all the old compare-and-branch instructions
3105 for (int n = first_key; n < last_key; n++) {
3106 inst->remove_at(start_insn);
3107 inst->remove_at(start_insn);
3108 }
3109 // Insert the tableswitch instruction
3110 inst->insert_before(start_insn,
3111 new LIR_Op2(lir_cmp, lir_cond_always,
3112 LIR_OprFact::intConst(tableswitch_count),
3113 reg_opr));
3114 inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3115 tableswitch_count++;
3116 }
3117 reg = noreg;
3118 last_key = -2147483648;
3119 }
3120 next_state:
3121 ;
3122 }
3123 #endif
3124 }
3125
atomic_op(LIR_Code code,LIR_Opr src,LIR_Opr data,LIR_Opr dest,LIR_Opr tmp_op)3126 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3127 Address addr = as_Address(src->as_address_ptr());
3128 BasicType type = src->type();
3129 bool is_oop = type == T_OBJECT || type == T_ARRAY;
3130
3131 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3132 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3133
3134 switch(type) {
3135 case T_INT:
3136 xchg = &MacroAssembler::atomic_xchgalw;
3137 add = &MacroAssembler::atomic_addalw;
3138 break;
3139 case T_LONG:
3140 xchg = &MacroAssembler::atomic_xchgal;
3141 add = &MacroAssembler::atomic_addal;
3142 break;
3143 case T_OBJECT:
3144 case T_ARRAY:
3145 if (UseCompressedOops) {
3146 xchg = &MacroAssembler::atomic_xchgalw;
3147 add = &MacroAssembler::atomic_addalw;
3148 } else {
3149 xchg = &MacroAssembler::atomic_xchgal;
3150 add = &MacroAssembler::atomic_addal;
3151 }
3152 break;
3153 default:
3154 ShouldNotReachHere();
3155 xchg = &MacroAssembler::atomic_xchgal;
3156 add = &MacroAssembler::atomic_addal; // unreachable
3157 }
3158
3159 switch (code) {
3160 case lir_xadd:
3161 {
3162 RegisterOrConstant inc;
3163 Register tmp = as_reg(tmp_op);
3164 Register dst = as_reg(dest);
3165 if (data->is_constant()) {
3166 inc = RegisterOrConstant(as_long(data));
3167 assert_different_registers(dst, addr.base(), tmp,
3168 rscratch1, rscratch2);
3169 } else {
3170 inc = RegisterOrConstant(as_reg(data));
3171 assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3172 rscratch1, rscratch2);
3173 }
3174 __ lea(tmp, addr);
3175 (_masm->*add)(dst, inc, tmp);
3176 break;
3177 }
3178 case lir_xchg:
3179 {
3180 Register tmp = tmp_op->as_register();
3181 Register obj = as_reg(data);
3182 Register dst = as_reg(dest);
3183 if (is_oop && UseCompressedOops) {
3184 __ encode_heap_oop(rscratch2, obj);
3185 obj = rscratch2;
3186 }
3187 assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3188 __ lea(tmp, addr);
3189 (_masm->*xchg)(dst, obj, tmp);
3190 if (is_oop && UseCompressedOops) {
3191 __ decode_heap_oop(dst);
3192 }
3193 }
3194 break;
3195 default:
3196 ShouldNotReachHere();
3197 }
3198 __ membar(__ AnyAny);
3199 }
3200
3201 #undef __
3202