1 /*
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "nativeInst_arm.hpp"
38 #include "oops/objArrayKlass.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "vmreg_arm.inline.hpp"
42
43 #define __ _masm->
44
45 // Note: Rtemp usage is this file should not impact C2 and should be
46 // correct as long as it is not implicitly used in lower layers (the
47 // arm [macro]assembler) and used with care in the other C1 specific
48 // files.
49
is_small_constant(LIR_Opr opr)50 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
51 ShouldNotCallThis(); // Not used on ARM
52 return false;
53 }
54
55
receiverOpr()56 LIR_Opr LIR_Assembler::receiverOpr() {
57 // The first register in Java calling conventions
58 return FrameMap::R0_oop_opr;
59 }
60
osrBufferPointer()61 LIR_Opr LIR_Assembler::osrBufferPointer() {
62 return FrameMap::as_pointer_opr(R0);
63 }
64
65 #ifndef PRODUCT
verify_reserved_argument_area_size(int args_count)66 void LIR_Assembler::verify_reserved_argument_area_size(int args_count) {
67 assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments");
68 }
69 #endif // !PRODUCT
70
store_parameter(jint c,int offset_from_sp_in_words)71 void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) {
72 assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
73 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
74 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space");
75 __ mov_slow(Rtemp, c);
76 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes));
77 }
78
store_parameter(Metadata * m,int offset_from_sp_in_words)79 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) {
80 assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
81 int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
82 assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space");
83 __ mov_metadata(Rtemp, m);
84 __ str(Rtemp, Address(SP, offset_from_sp_in_bytes));
85 }
86
87 //--------------fpu register translations-----------------------
88
89
set_24bit_FPU()90 void LIR_Assembler::set_24bit_FPU() {
91 ShouldNotReachHere();
92 }
93
reset_FPU()94 void LIR_Assembler::reset_FPU() {
95 ShouldNotReachHere();
96 }
97
fpop()98 void LIR_Assembler::fpop() {
99 Unimplemented();
100 }
101
fxch(int i)102 void LIR_Assembler::fxch(int i) {
103 Unimplemented();
104 }
105
fld(int i)106 void LIR_Assembler::fld(int i) {
107 Unimplemented();
108 }
109
ffree(int i)110 void LIR_Assembler::ffree(int i) {
111 Unimplemented();
112 }
113
breakpoint()114 void LIR_Assembler::breakpoint() {
115 __ breakpoint();
116 }
117
push(LIR_Opr opr)118 void LIR_Assembler::push(LIR_Opr opr) {
119 Unimplemented();
120 }
121
pop(LIR_Opr opr)122 void LIR_Assembler::pop(LIR_Opr opr) {
123 Unimplemented();
124 }
125
126 //-------------------------------------------
as_Address(LIR_Address * addr)127 Address LIR_Assembler::as_Address(LIR_Address* addr) {
128 Register base = addr->base()->as_pointer_register();
129
130
131 if (addr->index()->is_illegal() || addr->index()->is_constant()) {
132 int offset = addr->disp();
133 if (addr->index()->is_constant()) {
134 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale();
135 }
136
137 if ((offset <= -4096) || (offset >= 4096)) {
138 BAILOUT_("offset not in range", Address(base));
139 }
140
141 return Address(base, offset);
142
143 } else {
144 assert(addr->disp() == 0, "can't have both");
145 int scale = addr->scale();
146
147 assert(addr->index()->is_single_cpu(), "should be");
148 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) :
149 Address(base, addr->index()->as_register(), lsr, -scale);
150 }
151 }
152
as_Address_hi(LIR_Address * addr)153 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
154 Address base = as_Address(addr);
155 assert(base.index() == noreg, "must be");
156 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); }
157 return Address(base.base(), base.disp() + BytesPerWord);
158 }
159
as_Address_lo(LIR_Address * addr)160 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
161 return as_Address(addr);
162 }
163
164
osr_entry()165 void LIR_Assembler::osr_entry() {
166 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
167 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
168 ValueStack* entry_state = osr_entry->end()->state();
169 int number_of_locks = entry_state->locks_size();
170
171 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
172 Register OSR_buf = osrBufferPointer()->as_pointer_register();
173
174 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
175 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord;
176 for (int i = 0; i < number_of_locks; i++) {
177 int slot_offset = monitor_offset - (i * 2 * BytesPerWord);
178 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord));
179 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord));
180 __ str(R1, frame_map()->address_for_monitor_lock(i));
181 __ str(R2, frame_map()->address_for_monitor_object(i));
182 }
183 }
184
185
check_icache()186 int LIR_Assembler::check_icache() {
187 Register receiver = LIR_Assembler::receiverOpr()->as_register();
188 int offset = __ offset();
189 __ inline_cache_check(receiver, Ricklass);
190 return offset;
191 }
192
193
jobject2reg_with_patching(Register reg,CodeEmitInfo * info)194 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
195 jobject o = (jobject)Universe::non_oop_word();
196 int index = __ oop_recorder()->allocate_oop_index(o);
197
198 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index);
199
200 __ patchable_mov_oop(reg, o, index);
201 patching_epilog(patch, lir_patch_normal, reg, info);
202 }
203
204
klass2reg_with_patching(Register reg,CodeEmitInfo * info)205 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
206 Metadata* o = (Metadata*)Universe::non_oop_word();
207 int index = __ oop_recorder()->allocate_metadata_index(o);
208 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
209
210 __ patchable_mov_metadata(reg, o, index);
211 patching_epilog(patch, lir_patch_normal, reg, info);
212 }
213
214
initial_frame_size_in_bytes() const215 int LIR_Assembler::initial_frame_size_in_bytes() const {
216 // Subtracts two words to account for return address and link
217 return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize;
218 }
219
220
emit_exception_handler()221 int LIR_Assembler::emit_exception_handler() {
222 // TODO: ARM
223 __ nop(); // See comments in other ports
224
225 address handler_base = __ start_a_stub(exception_handler_size());
226 if (handler_base == NULL) {
227 bailout("exception handler overflow");
228 return -1;
229 }
230
231 int offset = code_offset();
232
233 // check that there is really an exception
234 __ verify_not_null_oop(Rexception_obj);
235
236 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
237 __ should_not_reach_here();
238
239 assert(code_offset() - offset <= exception_handler_size(), "overflow");
240 __ end_a_stub();
241
242 return offset;
243 }
244
245 // Emit the code to remove the frame from the stack in the exception
246 // unwind path.
emit_unwind_handler()247 int LIR_Assembler::emit_unwind_handler() {
248 #ifndef PRODUCT
249 if (CommentedAssembly) {
250 _masm->block_comment("Unwind handler");
251 }
252 #endif
253
254 int offset = code_offset();
255
256 // Fetch the exception from TLS and clear out exception related thread state
257 Register zero = __ zero_register(Rtemp);
258 __ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
259 __ str(zero, Address(Rthread, JavaThread::exception_oop_offset()));
260 __ str(zero, Address(Rthread, JavaThread::exception_pc_offset()));
261
262 __ bind(_unwind_handler_entry);
263 __ verify_not_null_oop(Rexception_obj);
264
265 // Preform needed unlocking
266 MonitorExitStub* stub = NULL;
267 if (method()->is_synchronized()) {
268 monitor_address(0, FrameMap::R0_opr);
269 stub = new MonitorExitStub(FrameMap::R0_opr, true, 0);
270 __ unlock_object(R2, R1, R0, Rtemp, *stub->entry());
271 __ bind(*stub->continuation());
272 }
273
274 // remove the activation and dispatch to the unwind handler
275 __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
276 __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
277
278 // Emit the slow path assembly
279 if (stub != NULL) {
280 stub->emit_code(this);
281 }
282
283 return offset;
284 }
285
286
emit_deopt_handler()287 int LIR_Assembler::emit_deopt_handler() {
288 address handler_base = __ start_a_stub(deopt_handler_size());
289 if (handler_base == NULL) {
290 bailout("deopt handler overflow");
291 return -1;
292 }
293
294 int offset = code_offset();
295
296 __ mov_relative_address(LR, __ pc());
297 __ push(LR); // stub expects LR to be saved
298 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
299
300 assert(code_offset() - offset <= deopt_handler_size(), "overflow");
301 __ end_a_stub();
302
303 return offset;
304 }
305
306
return_op(LIR_Opr result)307 void LIR_Assembler::return_op(LIR_Opr result) {
308 // Pop the frame before safepoint polling
309 __ remove_frame(initial_frame_size_in_bytes());
310
311 // mov_slow here is usually one or two instruction
312 __ mov_address(Rtemp, os::get_polling_page(), symbolic_Relocation::polling_page_reference);
313 __ relocate(relocInfo::poll_return_type);
314 __ ldr(Rtemp, Address(Rtemp));
315 __ ret();
316 }
317
318
safepoint_poll(LIR_Opr tmp,CodeEmitInfo * info)319 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
320 __ mov_address(Rtemp, os::get_polling_page(), symbolic_Relocation::polling_page_reference);
321 if (info != NULL) {
322 add_debug_info_for_branch(info);
323 }
324 int offset = __ offset();
325 __ relocate(relocInfo::poll_type);
326 __ ldr(Rtemp, Address(Rtemp));
327 return offset;
328 }
329
330
move_regs(Register from_reg,Register to_reg)331 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
332 if (from_reg != to_reg) {
333 __ mov(to_reg, from_reg);
334 }
335 }
336
const2reg(LIR_Opr src,LIR_Opr dest,LIR_PatchCode patch_code,CodeEmitInfo * info)337 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
338 assert(src->is_constant() && dest->is_register(), "must be");
339 LIR_Const* c = src->as_constant_ptr();
340
341 switch (c->type()) {
342 case T_ADDRESS:
343 case T_INT:
344 assert(patch_code == lir_patch_none, "no patching handled here");
345 __ mov_slow(dest->as_register(), c->as_jint());
346 break;
347
348 case T_LONG:
349 assert(patch_code == lir_patch_none, "no patching handled here");
350 __ mov_slow(dest->as_register_lo(), c->as_jint_lo());
351 __ mov_slow(dest->as_register_hi(), c->as_jint_hi());
352 break;
353
354 case T_OBJECT:
355 if (patch_code == lir_patch_none) {
356 __ mov_oop(dest->as_register(), c->as_jobject());
357 } else {
358 jobject2reg_with_patching(dest->as_register(), info);
359 }
360 break;
361
362 case T_METADATA:
363 if (patch_code == lir_patch_none) {
364 __ mov_metadata(dest->as_register(), c->as_metadata());
365 } else {
366 klass2reg_with_patching(dest->as_register(), info);
367 }
368 break;
369
370 case T_FLOAT:
371 if (dest->is_single_fpu()) {
372 __ mov_float(dest->as_float_reg(), c->as_jfloat());
373 } else {
374 // Simple getters can return float constant directly into r0
375 __ mov_slow(dest->as_register(), c->as_jint_bits());
376 }
377 break;
378
379 case T_DOUBLE:
380 if (dest->is_double_fpu()) {
381 __ mov_double(dest->as_double_reg(), c->as_jdouble());
382 } else {
383 // Simple getters can return double constant directly into r1r0
384 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits());
385 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits());
386 }
387 break;
388
389 default:
390 ShouldNotReachHere();
391 }
392 }
393
const2stack(LIR_Opr src,LIR_Opr dest)394 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
395 assert(src->is_constant(), "must be");
396 assert(dest->is_stack(), "must be");
397 LIR_Const* c = src->as_constant_ptr();
398
399 switch (c->type()) {
400 case T_INT: // fall through
401 case T_FLOAT:
402 __ mov_slow(Rtemp, c->as_jint_bits());
403 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
404 break;
405
406 case T_ADDRESS:
407 __ mov_slow(Rtemp, c->as_jint());
408 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
409 break;
410
411 case T_OBJECT:
412 __ mov_oop(Rtemp, c->as_jobject());
413 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
414 break;
415
416 case T_LONG: // fall through
417 case T_DOUBLE:
418 __ mov_slow(Rtemp, c->as_jint_lo_bits());
419 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
420 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) {
421 __ mov_slow(Rtemp, c->as_jint_hi_bits());
422 }
423 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
424 break;
425
426 default:
427 ShouldNotReachHere();
428 }
429 }
430
const2mem(LIR_Opr src,LIR_Opr dest,BasicType type,CodeEmitInfo * info,bool wide)431 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
432 CodeEmitInfo* info, bool wide) {
433 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL),"cannot handle otherwise");
434 __ mov(Rtemp, 0);
435
436 int null_check_offset = code_offset();
437 __ str(Rtemp, as_Address(dest->as_address_ptr()));
438
439 if (info != NULL) {
440 assert(false, "arm32 didn't support this before, investigate if bug");
441 add_debug_info_for_null_check(null_check_offset, info);
442 }
443 }
444
reg2reg(LIR_Opr src,LIR_Opr dest)445 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
446 assert(src->is_register() && dest->is_register(), "must be");
447
448 if (src->is_single_cpu()) {
449 if (dest->is_single_cpu()) {
450 move_regs(src->as_register(), dest->as_register());
451 } else if (dest->is_single_fpu()) {
452 __ fmsr(dest->as_float_reg(), src->as_register());
453 } else {
454 ShouldNotReachHere();
455 }
456 } else if (src->is_double_cpu()) {
457 if (dest->is_double_cpu()) {
458 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi());
459 } else {
460 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());
461 }
462 } else if (src->is_single_fpu()) {
463 if (dest->is_single_fpu()) {
464 __ mov_float(dest->as_float_reg(), src->as_float_reg());
465 } else if (dest->is_single_cpu()) {
466 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg());
467 } else {
468 ShouldNotReachHere();
469 }
470 } else if (src->is_double_fpu()) {
471 if (dest->is_double_fpu()) {
472 __ mov_double(dest->as_double_reg(), src->as_double_reg());
473 } else if (dest->is_double_cpu()) {
474 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());
475 } else {
476 ShouldNotReachHere();
477 }
478 } else {
479 ShouldNotReachHere();
480 }
481 }
482
reg2stack(LIR_Opr src,LIR_Opr dest,BasicType type,bool pop_fpu_stack)483 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
484 assert(src->is_register(), "should not call otherwise");
485 assert(dest->is_stack(), "should not call otherwise");
486
487 Address addr = dest->is_single_word() ?
488 frame_map()->address_for_slot(dest->single_stack_ix()) :
489 frame_map()->address_for_slot(dest->double_stack_ix());
490
491 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
492 if (src->is_single_fpu() || src->is_double_fpu()) {
493 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
494 }
495
496 if (src->is_single_cpu()) {
497 switch (type) {
498 case T_OBJECT:
499 case T_ARRAY: __ verify_oop(src->as_register()); // fall through
500 case T_ADDRESS:
501 case T_METADATA: __ str(src->as_register(), addr); break;
502 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through
503 case T_INT: __ str_32(src->as_register(), addr); break;
504 default:
505 ShouldNotReachHere();
506 }
507 } else if (src->is_double_cpu()) {
508 __ str(src->as_register_lo(), addr);
509 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
510 } else if (src->is_single_fpu()) {
511 __ str_float(src->as_float_reg(), addr);
512 } else if (src->is_double_fpu()) {
513 __ str_double(src->as_double_reg(), addr);
514 } else {
515 ShouldNotReachHere();
516 }
517 }
518
519
reg2mem(LIR_Opr src,LIR_Opr dest,BasicType type,LIR_PatchCode patch_code,CodeEmitInfo * info,bool pop_fpu_stack,bool wide,bool unaligned)520 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
521 LIR_PatchCode patch_code, CodeEmitInfo* info,
522 bool pop_fpu_stack, bool wide,
523 bool unaligned) {
524 LIR_Address* to_addr = dest->as_address_ptr();
525 Register base_reg = to_addr->base()->as_pointer_register();
526 const bool needs_patching = (patch_code != lir_patch_none);
527
528 PatchingStub* patch = NULL;
529 if (needs_patching) {
530 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
531 }
532
533 int null_check_offset = code_offset();
534
535 switch (type) {
536 case T_ARRAY:
537 case T_OBJECT:
538 if (UseCompressedOops && !wide) {
539 ShouldNotReachHere();
540 } else {
541 __ str(src->as_register(), as_Address(to_addr));
542 }
543 break;
544
545 case T_ADDRESS:
546 __ str(src->as_pointer_register(), as_Address(to_addr));
547 break;
548
549 case T_BYTE:
550 case T_BOOLEAN:
551 __ strb(src->as_register(), as_Address(to_addr));
552 break;
553
554 case T_CHAR:
555 case T_SHORT:
556 __ strh(src->as_register(), as_Address(to_addr));
557 break;
558
559 case T_INT:
560 #ifdef __SOFTFP__
561 case T_FLOAT:
562 #endif // __SOFTFP__
563 __ str_32(src->as_register(), as_Address(to_addr));
564 break;
565
566
567 #ifdef __SOFTFP__
568 case T_DOUBLE:
569 #endif // __SOFTFP__
570 case T_LONG: {
571 Register from_lo = src->as_register_lo();
572 Register from_hi = src->as_register_hi();
573 if (to_addr->index()->is_register()) {
574 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
575 assert(to_addr->disp() == 0, "Not yet supporting both");
576 __ add(Rtemp, base_reg, to_addr->index()->as_register());
577 base_reg = Rtemp;
578 __ str(from_lo, Address(Rtemp));
579 if (patch != NULL) {
580 patching_epilog(patch, lir_patch_low, base_reg, info);
581 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
582 patch_code = lir_patch_high;
583 }
584 __ str(from_hi, Address(Rtemp, BytesPerWord));
585 } else if (base_reg == from_lo) {
586 __ str(from_hi, as_Address_hi(to_addr));
587 if (patch != NULL) {
588 patching_epilog(patch, lir_patch_high, base_reg, info);
589 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
590 patch_code = lir_patch_low;
591 }
592 __ str(from_lo, as_Address_lo(to_addr));
593 } else {
594 __ str(from_lo, as_Address_lo(to_addr));
595 if (patch != NULL) {
596 patching_epilog(patch, lir_patch_low, base_reg, info);
597 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
598 patch_code = lir_patch_high;
599 }
600 __ str(from_hi, as_Address_hi(to_addr));
601 }
602 break;
603 }
604
605 #ifndef __SOFTFP__
606 case T_FLOAT:
607 if (to_addr->index()->is_register()) {
608 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
609 __ add(Rtemp, base_reg, to_addr->index()->as_register());
610 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
611 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp()));
612 } else {
613 __ fsts(src->as_float_reg(), as_Address(to_addr));
614 }
615 break;
616
617 case T_DOUBLE:
618 if (to_addr->index()->is_register()) {
619 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
620 __ add(Rtemp, base_reg, to_addr->index()->as_register());
621 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
622 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp()));
623 } else {
624 __ fstd(src->as_double_reg(), as_Address(to_addr));
625 }
626 break;
627 #endif // __SOFTFP__
628
629
630 default:
631 ShouldNotReachHere();
632 }
633
634 if (info != NULL) {
635 add_debug_info_for_null_check(null_check_offset, info);
636 }
637
638 if (patch != NULL) {
639 // Offset embeedded into LDR/STR instruction may appear not enough
640 // to address a field. So, provide a space for one more instruction
641 // that will deal with larger offsets.
642 __ nop();
643 patching_epilog(patch, patch_code, base_reg, info);
644 }
645 }
646
647
stack2reg(LIR_Opr src,LIR_Opr dest,BasicType type)648 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
649 assert(src->is_stack(), "should not call otherwise");
650 assert(dest->is_register(), "should not call otherwise");
651
652 Address addr = src->is_single_word() ?
653 frame_map()->address_for_slot(src->single_stack_ix()) :
654 frame_map()->address_for_slot(src->double_stack_ix());
655
656 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
657 if (dest->is_single_fpu() || dest->is_double_fpu()) {
658 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
659 }
660
661 if (dest->is_single_cpu()) {
662 switch (type) {
663 case T_OBJECT:
664 case T_ARRAY:
665 case T_ADDRESS:
666 case T_METADATA: __ ldr(dest->as_register(), addr); break;
667 case T_FLOAT: // used in floatToRawIntBits intrinsic implemenation
668 case T_INT: __ ldr_u32(dest->as_register(), addr); break;
669 default:
670 ShouldNotReachHere();
671 }
672 if ((type == T_OBJECT) || (type == T_ARRAY)) {
673 __ verify_oop(dest->as_register());
674 }
675 } else if (dest->is_double_cpu()) {
676 __ ldr(dest->as_register_lo(), addr);
677 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
678 } else if (dest->is_single_fpu()) {
679 __ ldr_float(dest->as_float_reg(), addr);
680 } else if (dest->is_double_fpu()) {
681 __ ldr_double(dest->as_double_reg(), addr);
682 } else {
683 ShouldNotReachHere();
684 }
685 }
686
687
stack2stack(LIR_Opr src,LIR_Opr dest,BasicType type)688 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
689 if (src->is_single_stack()) {
690 switch (src->type()) {
691 case T_OBJECT:
692 case T_ARRAY:
693 case T_ADDRESS:
694 case T_METADATA:
695 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
696 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
697 break;
698
699 case T_INT:
700 case T_FLOAT:
701 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
702 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
703 break;
704
705 default:
706 ShouldNotReachHere();
707 }
708 } else {
709 assert(src->is_double_stack(), "must be");
710 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes));
711 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
712 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
713 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
714 }
715 }
716
717
mem2reg(LIR_Opr src,LIR_Opr dest,BasicType type,LIR_PatchCode patch_code,CodeEmitInfo * info,bool wide,bool unaligned)718 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
719 LIR_PatchCode patch_code, CodeEmitInfo* info,
720 bool wide, bool unaligned) {
721 assert(src->is_address(), "should not call otherwise");
722 assert(dest->is_register(), "should not call otherwise");
723 LIR_Address* addr = src->as_address_ptr();
724
725 Register base_reg = addr->base()->as_pointer_register();
726
727 PatchingStub* patch = NULL;
728 if (patch_code != lir_patch_none) {
729 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
730 }
731 if (info != NULL) {
732 add_debug_info_for_null_check_here(info);
733 }
734
735 switch (type) {
736 case T_OBJECT: // fall through
737 case T_ARRAY:
738 if (UseCompressedOops && !wide) {
739 __ ldr_u32(dest->as_register(), as_Address(addr));
740 } else {
741 __ ldr(dest->as_register(), as_Address(addr));
742 }
743 break;
744
745 case T_ADDRESS:
746 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
747 __ ldr_u32(dest->as_pointer_register(), as_Address(addr));
748 } else {
749 __ ldr(dest->as_pointer_register(), as_Address(addr));
750 }
751 break;
752
753 case T_INT:
754 #ifdef __SOFTFP__
755 case T_FLOAT:
756 #endif // __SOFTFP__
757 __ ldr(dest->as_pointer_register(), as_Address(addr));
758 break;
759
760 case T_BOOLEAN:
761 __ ldrb(dest->as_register(), as_Address(addr));
762 break;
763
764 case T_BYTE:
765 __ ldrsb(dest->as_register(), as_Address(addr));
766 break;
767
768 case T_CHAR:
769 __ ldrh(dest->as_register(), as_Address(addr));
770 break;
771
772 case T_SHORT:
773 __ ldrsh(dest->as_register(), as_Address(addr));
774 break;
775
776
777 #ifdef __SOFTFP__
778 case T_DOUBLE:
779 #endif // __SOFTFP__
780 case T_LONG: {
781 Register to_lo = dest->as_register_lo();
782 Register to_hi = dest->as_register_hi();
783 if (addr->index()->is_register()) {
784 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
785 assert(addr->disp() == 0, "Not yet supporting both");
786 __ add(Rtemp, base_reg, addr->index()->as_register());
787 base_reg = Rtemp;
788 __ ldr(to_lo, Address(Rtemp));
789 if (patch != NULL) {
790 patching_epilog(patch, lir_patch_low, base_reg, info);
791 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
792 patch_code = lir_patch_high;
793 }
794 __ ldr(to_hi, Address(Rtemp, BytesPerWord));
795 } else if (base_reg == to_lo) {
796 __ ldr(to_hi, as_Address_hi(addr));
797 if (patch != NULL) {
798 patching_epilog(patch, lir_patch_high, base_reg, info);
799 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
800 patch_code = lir_patch_low;
801 }
802 __ ldr(to_lo, as_Address_lo(addr));
803 } else {
804 __ ldr(to_lo, as_Address_lo(addr));
805 if (patch != NULL) {
806 patching_epilog(patch, lir_patch_low, base_reg, info);
807 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
808 patch_code = lir_patch_high;
809 }
810 __ ldr(to_hi, as_Address_hi(addr));
811 }
812 break;
813 }
814
815 #ifndef __SOFTFP__
816 case T_FLOAT:
817 if (addr->index()->is_register()) {
818 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
819 __ add(Rtemp, base_reg, addr->index()->as_register());
820 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
821 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp()));
822 } else {
823 __ flds(dest->as_float_reg(), as_Address(addr));
824 }
825 break;
826
827 case T_DOUBLE:
828 if (addr->index()->is_register()) {
829 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
830 __ add(Rtemp, base_reg, addr->index()->as_register());
831 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
832 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp()));
833 } else {
834 __ fldd(dest->as_double_reg(), as_Address(addr));
835 }
836 break;
837 #endif // __SOFTFP__
838
839
840 default:
841 ShouldNotReachHere();
842 }
843
844 if (patch != NULL) {
845 // Offset embeedded into LDR/STR instruction may appear not enough
846 // to address a field. So, provide a space for one more instruction
847 // that will deal with larger offsets.
848 __ nop();
849 patching_epilog(patch, patch_code, base_reg, info);
850 }
851
852 }
853
854
emit_op3(LIR_Op3 * op)855 void LIR_Assembler::emit_op3(LIR_Op3* op) {
856 bool is_32 = op->result_opr()->is_single_cpu();
857
858 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) {
859 int c = op->in_opr2()->as_constant_ptr()->as_jint();
860 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register");
861
862 Register left = op->in_opr1()->as_register();
863 Register dest = op->result_opr()->as_register();
864 if (c == 1) {
865 __ mov(dest, left);
866 } else if (c == 2) {
867 __ add_32(dest, left, AsmOperand(left, lsr, 31));
868 __ asr_32(dest, dest, 1);
869 } else if (c != (int) 0x80000000) {
870 int power = log2_intptr(c);
871 __ asr_32(Rtemp, left, 31);
872 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0);
873 __ asr_32(dest, dest, power); // dest = dest >>> power;
874 } else {
875 // x/0x80000000 is a special case, since dividend is a power of two, but is negative.
876 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000.
877 __ cmp_32(left, c);
878 __ mov(dest, 0, ne);
879 __ mov(dest, 1, eq);
880 }
881 } else {
882 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3");
883 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
884 add_debug_info_for_div0_here(op->info());
885 }
886 }
887
888
emit_opBranch(LIR_OpBranch * op)889 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
890 #ifdef ASSERT
891 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
892 if (op->block() != NULL) _branch_target_blocks.append(op->block());
893 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
894 assert(op->info() == NULL, "CodeEmitInfo?");
895 #endif // ASSERT
896
897 #ifdef __SOFTFP__
898 assert (op->code() != lir_cond_float_branch, "this should be impossible");
899 #else
900 if (op->code() == lir_cond_float_branch) {
901 __ fmstat();
902 __ b(*(op->ublock()->label()), vs);
903 }
904 #endif // __SOFTFP__
905
906 AsmCondition acond = al;
907 switch (op->cond()) {
908 case lir_cond_equal: acond = eq; break;
909 case lir_cond_notEqual: acond = ne; break;
910 case lir_cond_less: acond = lt; break;
911 case lir_cond_lessEqual: acond = le; break;
912 case lir_cond_greaterEqual: acond = ge; break;
913 case lir_cond_greater: acond = gt; break;
914 case lir_cond_aboveEqual: acond = hs; break;
915 case lir_cond_belowEqual: acond = ls; break;
916 default: assert(op->cond() == lir_cond_always, "must be");
917 }
918 __ b(*(op->label()), acond);
919 }
920
921
emit_opConvert(LIR_OpConvert * op)922 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
923 LIR_Opr src = op->in_opr();
924 LIR_Opr dest = op->result_opr();
925
926 switch (op->bytecode()) {
927 case Bytecodes::_i2l:
928 move_regs(src->as_register(), dest->as_register_lo());
929 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31));
930 break;
931 case Bytecodes::_l2i:
932 move_regs(src->as_register_lo(), dest->as_register());
933 break;
934 case Bytecodes::_i2b:
935 __ sign_extend(dest->as_register(), src->as_register(), 8);
936 break;
937 case Bytecodes::_i2s:
938 __ sign_extend(dest->as_register(), src->as_register(), 16);
939 break;
940 case Bytecodes::_i2c:
941 __ zero_extend(dest->as_register(), src->as_register(), 16);
942 break;
943 case Bytecodes::_f2d:
944 __ convert_f2d(dest->as_double_reg(), src->as_float_reg());
945 break;
946 case Bytecodes::_d2f:
947 __ convert_d2f(dest->as_float_reg(), src->as_double_reg());
948 break;
949 case Bytecodes::_i2f:
950 __ fmsr(Stemp, src->as_register());
951 __ fsitos(dest->as_float_reg(), Stemp);
952 break;
953 case Bytecodes::_i2d:
954 __ fmsr(Stemp, src->as_register());
955 __ fsitod(dest->as_double_reg(), Stemp);
956 break;
957 case Bytecodes::_f2i:
958 __ ftosizs(Stemp, src->as_float_reg());
959 __ fmrs(dest->as_register(), Stemp);
960 break;
961 case Bytecodes::_d2i:
962 __ ftosizd(Stemp, src->as_double_reg());
963 __ fmrs(dest->as_register(), Stemp);
964 break;
965 default:
966 ShouldNotReachHere();
967 }
968 }
969
970
emit_alloc_obj(LIR_OpAllocObj * op)971 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
972 if (op->init_check()) {
973 Register tmp = op->tmp1()->as_register();
974 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
975 add_debug_info_for_null_check_here(op->stub()->info());
976 __ cmp(tmp, InstanceKlass::fully_initialized);
977 __ b(*op->stub()->entry(), ne);
978 }
979 __ allocate_object(op->obj()->as_register(),
980 op->tmp1()->as_register(),
981 op->tmp2()->as_register(),
982 op->tmp3()->as_register(),
983 op->header_size(),
984 op->object_size(),
985 op->klass()->as_register(),
986 *op->stub()->entry());
987 __ bind(*op->stub()->continuation());
988 }
989
emit_alloc_array(LIR_OpAllocArray * op)990 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
991 if (UseSlowPath ||
992 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
993 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
994 __ b(*op->stub()->entry());
995 } else {
996 __ allocate_array(op->obj()->as_register(),
997 op->len()->as_register(),
998 op->tmp1()->as_register(),
999 op->tmp2()->as_register(),
1000 op->tmp3()->as_register(),
1001 arrayOopDesc::header_size(op->type()),
1002 type2aelembytes(op->type()),
1003 op->klass()->as_register(),
1004 *op->stub()->entry());
1005 }
1006 __ bind(*op->stub()->continuation());
1007 }
1008
type_profile_helper(Register mdo,int mdo_offset_bias,ciMethodData * md,ciProfileData * data,Register recv,Register tmp1,Label * update_done)1009 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
1010 ciMethodData *md, ciProfileData *data,
1011 Register recv, Register tmp1, Label* update_done) {
1012 assert_different_registers(mdo, recv, tmp1);
1013 uint i;
1014 for (i = 0; i < VirtualCallData::row_limit(); i++) {
1015 Label next_test;
1016 // See if the receiver is receiver[n].
1017 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
1018 mdo_offset_bias);
1019 __ ldr(tmp1, receiver_addr);
1020 __ verify_klass_ptr(tmp1);
1021 __ cmp(recv, tmp1);
1022 __ b(next_test, ne);
1023 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
1024 mdo_offset_bias);
1025 __ ldr(tmp1, data_addr);
1026 __ add(tmp1, tmp1, DataLayout::counter_increment);
1027 __ str(tmp1, data_addr);
1028 __ b(*update_done);
1029 __ bind(next_test);
1030 }
1031
1032 // Didn't find receiver; find next empty slot and fill it in
1033 for (i = 0; i < VirtualCallData::row_limit(); i++) {
1034 Label next_test;
1035 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
1036 mdo_offset_bias);
1037 __ ldr(tmp1, recv_addr);
1038 __ cbnz(tmp1, next_test);
1039 __ str(recv, recv_addr);
1040 __ mov(tmp1, DataLayout::counter_increment);
1041 __ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
1042 mdo_offset_bias));
1043 __ b(*update_done);
1044 __ bind(next_test);
1045 }
1046 }
1047
setup_md_access(ciMethod * method,int bci,ciMethodData * & md,ciProfileData * & data,int & mdo_offset_bias)1048 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
1049 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
1050 md = method->method_data_or_null();
1051 assert(md != NULL, "Sanity");
1052 data = md->bci_to_data(bci);
1053 assert(data != NULL, "need data for checkcast");
1054 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1055 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) {
1056 // The offset is large so bias the mdo by the base of the slot so
1057 // that the ldr can use an immediate offset to reference the slots of the data
1058 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
1059 }
1060 }
1061
1062 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null).
typecheck_profile_helper1(ciMethod * method,int bci,ciMethodData * & md,ciProfileData * & data,int & mdo_offset_bias,Register obj,Register mdo,Register data_val,Label * obj_is_null)1063 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci,
1064 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias,
1065 Register obj, Register mdo, Register data_val, Label* obj_is_null) {
1066 assert(method != NULL, "Should have method");
1067 assert_different_registers(obj, mdo, data_val);
1068 setup_md_access(method, bci, md, data, mdo_offset_bias);
1069 Label not_null;
1070 __ b(not_null, ne);
1071 __ mov_metadata(mdo, md->constant_encoding());
1072 if (mdo_offset_bias > 0) {
1073 __ mov_slow(data_val, mdo_offset_bias);
1074 __ add(mdo, mdo, data_val);
1075 }
1076 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
1077 __ ldrb(data_val, flags_addr);
1078 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant());
1079 __ strb(data_val, flags_addr);
1080 __ b(*obj_is_null);
1081 __ bind(not_null);
1082 }
1083
typecheck_profile_helper2(ciMethodData * md,ciProfileData * data,int mdo_offset_bias,Register mdo,Register recv,Register value,Register tmp1,Label * profile_cast_success,Label * profile_cast_failure,Label * success,Label * failure)1084 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias,
1085 Register mdo, Register recv, Register value, Register tmp1,
1086 Label* profile_cast_success, Label* profile_cast_failure,
1087 Label* success, Label* failure) {
1088 assert_different_registers(mdo, value, tmp1);
1089 __ bind(*profile_cast_success);
1090 __ mov_metadata(mdo, md->constant_encoding());
1091 if (mdo_offset_bias > 0) {
1092 __ mov_slow(tmp1, mdo_offset_bias);
1093 __ add(mdo, mdo, tmp1);
1094 }
1095 __ load_klass(recv, value);
1096 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
1097 __ b(*success);
1098 // Cast failure case
1099 __ bind(*profile_cast_failure);
1100 __ mov_metadata(mdo, md->constant_encoding());
1101 if (mdo_offset_bias > 0) {
1102 __ mov_slow(tmp1, mdo_offset_bias);
1103 __ add(mdo, mdo, tmp1);
1104 }
1105 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
1106 __ ldr(tmp1, data_addr);
1107 __ sub(tmp1, tmp1, DataLayout::counter_increment);
1108 __ str(tmp1, data_addr);
1109 __ b(*failure);
1110 }
1111
1112 // Sets `res` to true, if `cond` holds.
set_instanceof_result(MacroAssembler * _masm,Register res,AsmCondition cond)1113 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) {
1114 __ mov(res, 1, cond);
1115 }
1116
1117
emit_opTypeCheck(LIR_OpTypeCheck * op)1118 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1119 // TODO: ARM - can be more effective with one more register
1120 switch (op->code()) {
1121 case lir_store_check: {
1122 CodeStub* stub = op->stub();
1123 Register value = op->object()->as_register();
1124 Register array = op->array()->as_register();
1125 Register klass_RInfo = op->tmp1()->as_register();
1126 Register k_RInfo = op->tmp2()->as_register();
1127 assert_different_registers(klass_RInfo, k_RInfo, Rtemp);
1128 if (op->should_profile()) {
1129 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp);
1130 }
1131
1132 // check if it needs to be profiled
1133 ciMethodData* md;
1134 ciProfileData* data;
1135 int mdo_offset_bias = 0;
1136 Label profile_cast_success, profile_cast_failure, done;
1137 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1138 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1139
1140 if (op->should_profile()) {
1141 __ cmp(value, 0);
1142 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done);
1143 } else {
1144 __ cbz(value, done);
1145 }
1146 assert_different_registers(k_RInfo, value);
1147 add_debug_info_for_null_check_here(op->info_for_exception());
1148 __ load_klass(k_RInfo, array);
1149 __ load_klass(klass_RInfo, value);
1150 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1151 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1152 // check for immediate positive hit
1153 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1154 __ cmp(klass_RInfo, k_RInfo);
1155 __ cond_cmp(Rtemp, k_RInfo, ne);
1156 __ b(*success_target, eq);
1157 // check for immediate negative hit
1158 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1159 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1160 __ b(*failure_target, ne);
1161 // slow case
1162 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1163 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1164 __ cbz(R0, *failure_target);
1165 if (op->should_profile()) {
1166 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1167 if (mdo == value) {
1168 mdo = k_RInfo;
1169 recv = klass_RInfo;
1170 }
1171 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1,
1172 &profile_cast_success, &profile_cast_failure,
1173 &done, stub->entry());
1174 }
1175 __ bind(done);
1176 break;
1177 }
1178
1179 case lir_checkcast: {
1180 CodeStub* stub = op->stub();
1181 Register obj = op->object()->as_register();
1182 Register res = op->result_opr()->as_register();
1183 Register klass_RInfo = op->tmp1()->as_register();
1184 Register k_RInfo = op->tmp2()->as_register();
1185 ciKlass* k = op->klass();
1186 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp);
1187
1188 if (stub->is_simple_exception_stub()) {
1189 // TODO: ARM - Late binding is used to prevent confusion of register allocator
1190 assert(stub->is_exception_throw_stub(), "must be");
1191 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr());
1192 }
1193 ciMethodData* md;
1194 ciProfileData* data;
1195 int mdo_offset_bias = 0;
1196
1197 Label done;
1198
1199 Label profile_cast_failure, profile_cast_success;
1200 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry();
1201 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1202
1203
1204 __ movs(res, obj);
1205 if (op->should_profile()) {
1206 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1207 } else {
1208 __ b(done, eq);
1209 }
1210 if (k->is_loaded()) {
1211 __ mov_metadata(k_RInfo, k->constant_encoding());
1212 } else if (k_RInfo != obj) {
1213 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1214 __ movs(res, obj);
1215 } else {
1216 // Patching doesn't update "res" register after GC, so do patching first
1217 klass2reg_with_patching(Rtemp, op->info_for_patch());
1218 __ movs(res, obj);
1219 __ mov(k_RInfo, Rtemp);
1220 }
1221 __ load_klass(klass_RInfo, res, ne);
1222
1223 if (op->fast_check()) {
1224 __ cmp(klass_RInfo, k_RInfo, ne);
1225 __ b(*failure_target, ne);
1226 } else if (k->is_loaded()) {
1227 __ b(*success_target, eq);
1228 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1229 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1230 __ cmp(Rtemp, k_RInfo);
1231 __ b(*failure_target, ne);
1232 } else {
1233 __ cmp(klass_RInfo, k_RInfo);
1234 __ cmp(Rtemp, k_RInfo, ne);
1235 __ b(*success_target, eq);
1236 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1237 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1238 __ cbz(R0, *failure_target);
1239 }
1240 } else {
1241 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1242 __ b(*success_target, eq);
1243 // check for immediate positive hit
1244 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1245 __ cmp(klass_RInfo, k_RInfo);
1246 __ cmp(Rtemp, k_RInfo, ne);
1247 __ b(*success_target, eq);
1248 // check for immediate negative hit
1249 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1250 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1251 __ b(*failure_target, ne);
1252 // slow case
1253 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1254 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1255 __ cbz(R0, *failure_target);
1256 }
1257
1258 if (op->should_profile()) {
1259 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1260 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1261 &profile_cast_success, &profile_cast_failure,
1262 &done, stub->entry());
1263 }
1264 __ bind(done);
1265 break;
1266 }
1267
1268 case lir_instanceof: {
1269 Register obj = op->object()->as_register();
1270 Register res = op->result_opr()->as_register();
1271 Register klass_RInfo = op->tmp1()->as_register();
1272 Register k_RInfo = op->tmp2()->as_register();
1273 ciKlass* k = op->klass();
1274 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp);
1275
1276 ciMethodData* md;
1277 ciProfileData* data;
1278 int mdo_offset_bias = 0;
1279
1280 Label done;
1281
1282 Label profile_cast_failure, profile_cast_success;
1283 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done;
1284 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1285
1286 __ movs(res, obj);
1287
1288 if (op->should_profile()) {
1289 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1290 } else {
1291 __ b(done, eq);
1292 }
1293
1294 if (k->is_loaded()) {
1295 __ mov_metadata(k_RInfo, k->constant_encoding());
1296 } else {
1297 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res));
1298 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1299 }
1300 __ load_klass(klass_RInfo, res);
1301
1302 if (!op->should_profile()) {
1303 __ mov(res, 0);
1304 }
1305
1306 if (op->fast_check()) {
1307 __ cmp(klass_RInfo, k_RInfo);
1308 if (!op->should_profile()) {
1309 set_instanceof_result(_masm, res, eq);
1310 } else {
1311 __ b(profile_cast_failure, ne);
1312 }
1313 } else if (k->is_loaded()) {
1314 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1315 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1316 __ cmp(Rtemp, k_RInfo);
1317 if (!op->should_profile()) {
1318 set_instanceof_result(_masm, res, eq);
1319 } else {
1320 __ b(profile_cast_failure, ne);
1321 }
1322 } else {
1323 __ cmp(klass_RInfo, k_RInfo);
1324 __ cond_cmp(Rtemp, k_RInfo, ne);
1325 if (!op->should_profile()) {
1326 set_instanceof_result(_masm, res, eq);
1327 }
1328 __ b(*success_target, eq);
1329 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1330 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1331 if (!op->should_profile()) {
1332 move_regs(R0, res);
1333 } else {
1334 __ cbz(R0, *failure_target);
1335 }
1336 }
1337 } else {
1338 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1339 // check for immediate positive hit
1340 __ cmp(klass_RInfo, k_RInfo);
1341 if (!op->should_profile()) {
1342 __ ldr(res, Address(klass_RInfo, Rtemp), ne);
1343 __ cond_cmp(res, k_RInfo, ne);
1344 set_instanceof_result(_masm, res, eq);
1345 } else {
1346 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne);
1347 __ cond_cmp(Rtemp, k_RInfo, ne);
1348 }
1349 __ b(*success_target, eq);
1350 // check for immediate negative hit
1351 if (op->should_profile()) {
1352 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1353 }
1354 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1355 if (!op->should_profile()) {
1356 __ mov(res, 0, ne);
1357 }
1358 __ b(*failure_target, ne);
1359 // slow case
1360 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1361 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1362 if (!op->should_profile()) {
1363 move_regs(R0, res);
1364 }
1365 if (op->should_profile()) {
1366 __ cbz(R0, *failure_target);
1367 }
1368 }
1369
1370 if (op->should_profile()) {
1371 Label done_ok, done_failure;
1372 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1373 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1374 &profile_cast_success, &profile_cast_failure,
1375 &done_ok, &done_failure);
1376 __ bind(done_failure);
1377 __ mov(res, 0);
1378 __ b(done);
1379 __ bind(done_ok);
1380 __ mov(res, 1);
1381 }
1382 __ bind(done);
1383 break;
1384 }
1385 default:
1386 ShouldNotReachHere();
1387 }
1388 }
1389
1390
emit_compare_and_swap(LIR_OpCompareAndSwap * op)1391 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1392 // if (*addr == cmpval) {
1393 // *addr = newval;
1394 // dest = 1;
1395 // } else {
1396 // dest = 0;
1397 // }
1398 // FIXME: membar_release
1399 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
1400 Register addr = op->addr()->is_register() ?
1401 op->addr()->as_pointer_register() :
1402 op->addr()->as_address_ptr()->base()->as_pointer_register();
1403 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp");
1404 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_OprDesc::illegalOpr(), "unexpected index");
1405 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1406 Register cmpval = op->cmp_value()->as_register();
1407 Register newval = op->new_value()->as_register();
1408 Register dest = op->result_opr()->as_register();
1409 assert_different_registers(dest, addr, cmpval, newval, Rtemp);
1410
1411 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer
1412 __ mov(dest, 1, eq);
1413 __ mov(dest, 0, ne);
1414 } else if (op->code() == lir_cas_long) {
1415 assert(VM_Version::supports_cx8(), "wrong machine");
1416 Register cmp_value_lo = op->cmp_value()->as_register_lo();
1417 Register cmp_value_hi = op->cmp_value()->as_register_hi();
1418 Register new_value_lo = op->new_value()->as_register_lo();
1419 Register new_value_hi = op->new_value()->as_register_hi();
1420 Register dest = op->result_opr()->as_register();
1421 Register tmp_lo = op->tmp1()->as_register_lo();
1422 Register tmp_hi = op->tmp1()->as_register_hi();
1423
1424 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr);
1425 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
1426 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair");
1427 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
1428 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair");
1429 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi,
1430 new_value_lo, new_value_hi, addr, 0);
1431 } else {
1432 Unimplemented();
1433 }
1434 // FIXME: is full membar really needed instead of just membar_acquire?
1435 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
1436 }
1437
1438
cmove(LIR_Condition condition,LIR_Opr opr1,LIR_Opr opr2,LIR_Opr result,BasicType type)1439 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1440 AsmCondition acond = al;
1441 AsmCondition ncond = nv;
1442 if (opr1 != opr2) {
1443 switch (condition) {
1444 case lir_cond_equal: acond = eq; ncond = ne; break;
1445 case lir_cond_notEqual: acond = ne; ncond = eq; break;
1446 case lir_cond_less: acond = lt; ncond = ge; break;
1447 case lir_cond_lessEqual: acond = le; ncond = gt; break;
1448 case lir_cond_greaterEqual: acond = ge; ncond = lt; break;
1449 case lir_cond_greater: acond = gt; ncond = le; break;
1450 case lir_cond_aboveEqual: acond = hs; ncond = lo; break;
1451 case lir_cond_belowEqual: acond = ls; ncond = hi; break;
1452 default: ShouldNotReachHere();
1453 }
1454 }
1455
1456 for (;;) { // two iterations only
1457 if (opr1 == result) {
1458 // do nothing
1459 } else if (opr1->is_single_cpu()) {
1460 __ mov(result->as_register(), opr1->as_register(), acond);
1461 } else if (opr1->is_double_cpu()) {
1462 __ long_move(result->as_register_lo(), result->as_register_hi(),
1463 opr1->as_register_lo(), opr1->as_register_hi(), acond);
1464 } else if (opr1->is_single_stack()) {
1465 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond);
1466 } else if (opr1->is_double_stack()) {
1467 __ ldr(result->as_register_lo(),
1468 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond);
1469 __ ldr(result->as_register_hi(),
1470 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond);
1471 } else if (opr1->is_illegal()) {
1472 // do nothing: this part of the cmove has been optimized away in the peephole optimizer
1473 } else {
1474 assert(opr1->is_constant(), "must be");
1475 LIR_Const* c = opr1->as_constant_ptr();
1476
1477 switch (c->type()) {
1478 case T_INT:
1479 __ mov_slow(result->as_register(), c->as_jint(), acond);
1480 break;
1481 case T_LONG:
1482 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1483 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1484 break;
1485 case T_OBJECT:
1486 __ mov_oop(result->as_register(), c->as_jobject(), 0, acond);
1487 break;
1488 case T_FLOAT:
1489 #ifdef __SOFTFP__
1490 // not generated now.
1491 __ mov_slow(result->as_register(), c->as_jint(), acond);
1492 #else
1493 __ mov_float(result->as_float_reg(), c->as_jfloat(), acond);
1494 #endif // __SOFTFP__
1495 break;
1496 case T_DOUBLE:
1497 #ifdef __SOFTFP__
1498 // not generated now.
1499 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1500 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1501 #else
1502 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond);
1503 #endif // __SOFTFP__
1504 break;
1505 default:
1506 ShouldNotReachHere();
1507 }
1508 }
1509
1510 // Negate the condition and repeat the algorithm with the second operand
1511 if (opr1 == opr2) { break; }
1512 opr1 = opr2;
1513 acond = ncond;
1514 }
1515 }
1516
1517 #ifdef ASSERT
reg_size(LIR_Opr op)1518 static int reg_size(LIR_Opr op) {
1519 switch (op->type()) {
1520 case T_FLOAT:
1521 case T_INT: return BytesPerInt;
1522 case T_LONG:
1523 case T_DOUBLE: return BytesPerLong;
1524 case T_OBJECT:
1525 case T_ARRAY:
1526 case T_METADATA: return BytesPerWord;
1527 case T_ADDRESS:
1528 case T_ILLEGAL: // fall through
1529 default: ShouldNotReachHere(); return -1;
1530 }
1531 }
1532 #endif
1533
arith_op(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dest,CodeEmitInfo * info,bool pop_fpu_stack)1534 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1535 assert(info == NULL, "unused on this code path");
1536 assert(dest->is_register(), "wrong items state");
1537
1538 if (right->is_address()) {
1539 // special case for adding shifted/extended register
1540 const Register res = dest->as_pointer_register();
1541 const Register lreg = left->as_pointer_register();
1542 const LIR_Address* addr = right->as_address_ptr();
1543
1544 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1545
1546 int scale = addr->scale();
1547 AsmShift shift = lsl;
1548
1549
1550 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be");
1551 assert(reg_size(addr->base()) == reg_size(dest), "should be");
1552 assert(reg_size(dest) == wordSize, "should be");
1553
1554 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale);
1555 switch (code) {
1556 case lir_add: __ add(res, lreg, operand); break;
1557 case lir_sub: __ sub(res, lreg, operand); break;
1558 default: ShouldNotReachHere();
1559 }
1560
1561 } else if (left->is_address()) {
1562 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()");
1563 const LIR_Address* addr = left->as_address_ptr();
1564 const Register res = dest->as_register();
1565 const Register rreg = right->as_register();
1566 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1567 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale()));
1568
1569 } else if (dest->is_single_cpu()) {
1570 assert(left->is_single_cpu(), "unexpected left operand");
1571
1572 const Register res = dest->as_register();
1573 const Register lreg = left->as_register();
1574
1575 if (right->is_single_cpu()) {
1576 const Register rreg = right->as_register();
1577 switch (code) {
1578 case lir_add: __ add_32(res, lreg, rreg); break;
1579 case lir_sub: __ sub_32(res, lreg, rreg); break;
1580 case lir_mul: __ mul_32(res, lreg, rreg); break;
1581 default: ShouldNotReachHere();
1582 }
1583 } else {
1584 assert(right->is_constant(), "must be");
1585 const jint c = right->as_constant_ptr()->as_jint();
1586 if (!Assembler::is_arith_imm_in_range(c)) {
1587 BAILOUT("illegal arithmetic operand");
1588 }
1589 switch (code) {
1590 case lir_add: __ add_32(res, lreg, c); break;
1591 case lir_sub: __ sub_32(res, lreg, c); break;
1592 default: ShouldNotReachHere();
1593 }
1594 }
1595
1596 } else if (dest->is_double_cpu()) {
1597 Register res_lo = dest->as_register_lo();
1598 Register res_hi = dest->as_register_hi();
1599 Register lreg_lo = left->as_register_lo();
1600 Register lreg_hi = left->as_register_hi();
1601 if (right->is_double_cpu()) {
1602 Register rreg_lo = right->as_register_lo();
1603 Register rreg_hi = right->as_register_hi();
1604 if (res_lo == lreg_hi || res_lo == rreg_hi) {
1605 res_lo = Rtemp;
1606 }
1607 switch (code) {
1608 case lir_add:
1609 __ adds(res_lo, lreg_lo, rreg_lo);
1610 __ adc(res_hi, lreg_hi, rreg_hi);
1611 break;
1612 case lir_sub:
1613 __ subs(res_lo, lreg_lo, rreg_lo);
1614 __ sbc(res_hi, lreg_hi, rreg_hi);
1615 break;
1616 default:
1617 ShouldNotReachHere();
1618 }
1619 } else {
1620 assert(right->is_constant(), "must be");
1621 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range");
1622 const jint c = (jint) right->as_constant_ptr()->as_jlong();
1623 if (res_lo == lreg_hi) {
1624 res_lo = Rtemp;
1625 }
1626 switch (code) {
1627 case lir_add:
1628 __ adds(res_lo, lreg_lo, c);
1629 __ adc(res_hi, lreg_hi, 0);
1630 break;
1631 case lir_sub:
1632 __ subs(res_lo, lreg_lo, c);
1633 __ sbc(res_hi, lreg_hi, 0);
1634 break;
1635 default:
1636 ShouldNotReachHere();
1637 }
1638 }
1639 move_regs(res_lo, dest->as_register_lo());
1640
1641 } else if (dest->is_single_fpu()) {
1642 assert(left->is_single_fpu(), "must be");
1643 assert(right->is_single_fpu(), "must be");
1644 const FloatRegister res = dest->as_float_reg();
1645 const FloatRegister lreg = left->as_float_reg();
1646 const FloatRegister rreg = right->as_float_reg();
1647 switch (code) {
1648 case lir_add: __ add_float(res, lreg, rreg); break;
1649 case lir_sub: __ sub_float(res, lreg, rreg); break;
1650 case lir_mul_strictfp: // fall through
1651 case lir_mul: __ mul_float(res, lreg, rreg); break;
1652 case lir_div_strictfp: // fall through
1653 case lir_div: __ div_float(res, lreg, rreg); break;
1654 default: ShouldNotReachHere();
1655 }
1656 } else if (dest->is_double_fpu()) {
1657 assert(left->is_double_fpu(), "must be");
1658 assert(right->is_double_fpu(), "must be");
1659 const FloatRegister res = dest->as_double_reg();
1660 const FloatRegister lreg = left->as_double_reg();
1661 const FloatRegister rreg = right->as_double_reg();
1662 switch (code) {
1663 case lir_add: __ add_double(res, lreg, rreg); break;
1664 case lir_sub: __ sub_double(res, lreg, rreg); break;
1665 case lir_mul_strictfp: // fall through
1666 case lir_mul: __ mul_double(res, lreg, rreg); break;
1667 case lir_div_strictfp: // fall through
1668 case lir_div: __ div_double(res, lreg, rreg); break;
1669 default: ShouldNotReachHere();
1670 }
1671 } else {
1672 ShouldNotReachHere();
1673 }
1674 }
1675
1676
intrinsic_op(LIR_Code code,LIR_Opr value,LIR_Opr unused,LIR_Opr dest,LIR_Op * op)1677 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1678 switch (code) {
1679 case lir_abs:
1680 __ abs_double(dest->as_double_reg(), value->as_double_reg());
1681 break;
1682 case lir_sqrt:
1683 __ sqrt_double(dest->as_double_reg(), value->as_double_reg());
1684 break;
1685 default:
1686 ShouldNotReachHere();
1687 }
1688 }
1689
1690
logic_op(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dest)1691 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1692 assert(dest->is_register(), "wrong items state");
1693 assert(left->is_register(), "wrong items state");
1694
1695 if (dest->is_single_cpu()) {
1696
1697 const Register res = dest->as_register();
1698 const Register lreg = left->as_register();
1699
1700 if (right->is_single_cpu()) {
1701 const Register rreg = right->as_register();
1702 switch (code) {
1703 case lir_logic_and: __ and_32(res, lreg, rreg); break;
1704 case lir_logic_or: __ orr_32(res, lreg, rreg); break;
1705 case lir_logic_xor: __ eor_32(res, lreg, rreg); break;
1706 default: ShouldNotReachHere();
1707 }
1708 } else {
1709 assert(right->is_constant(), "must be");
1710 const uint c = (uint)right->as_constant_ptr()->as_jint();
1711 switch (code) {
1712 case lir_logic_and: __ and_32(res, lreg, c); break;
1713 case lir_logic_or: __ orr_32(res, lreg, c); break;
1714 case lir_logic_xor: __ eor_32(res, lreg, c); break;
1715 default: ShouldNotReachHere();
1716 }
1717 }
1718 } else {
1719 assert(dest->is_double_cpu(), "should be");
1720 Register res_lo = dest->as_register_lo();
1721
1722 assert (dest->type() == T_LONG, "unexpected result type");
1723 assert (left->type() == T_LONG, "unexpected left type");
1724 assert (right->type() == T_LONG, "unexpected right type");
1725
1726 const Register res_hi = dest->as_register_hi();
1727 const Register lreg_lo = left->as_register_lo();
1728 const Register lreg_hi = left->as_register_hi();
1729
1730 if (right->is_register()) {
1731 const Register rreg_lo = right->as_register_lo();
1732 const Register rreg_hi = right->as_register_hi();
1733 if (res_lo == lreg_hi || res_lo == rreg_hi) {
1734 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input
1735 }
1736 switch (code) {
1737 case lir_logic_and:
1738 __ andr(res_lo, lreg_lo, rreg_lo);
1739 __ andr(res_hi, lreg_hi, rreg_hi);
1740 break;
1741 case lir_logic_or:
1742 __ orr(res_lo, lreg_lo, rreg_lo);
1743 __ orr(res_hi, lreg_hi, rreg_hi);
1744 break;
1745 case lir_logic_xor:
1746 __ eor(res_lo, lreg_lo, rreg_lo);
1747 __ eor(res_hi, lreg_hi, rreg_hi);
1748 break;
1749 default:
1750 ShouldNotReachHere();
1751 }
1752 move_regs(res_lo, dest->as_register_lo());
1753 } else {
1754 assert(right->is_constant(), "must be");
1755 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong();
1756 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32);
1757 // Case for logic_or from do_ClassIDIntrinsic()
1758 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) {
1759 switch (code) {
1760 case lir_logic_and:
1761 __ andr(res_lo, lreg_lo, c_lo);
1762 __ mov(res_hi, 0);
1763 break;
1764 case lir_logic_or:
1765 __ orr(res_lo, lreg_lo, c_lo);
1766 break;
1767 case lir_logic_xor:
1768 __ eor(res_lo, lreg_lo, c_lo);
1769 break;
1770 default:
1771 ShouldNotReachHere();
1772 }
1773 } else if (code == lir_logic_and &&
1774 c_hi == -1 &&
1775 (AsmOperand::is_rotated_imm(c_lo) ||
1776 AsmOperand::is_rotated_imm(~c_lo))) {
1777 // Another case which handles logic_and from do_ClassIDIntrinsic()
1778 if (AsmOperand::is_rotated_imm(c_lo)) {
1779 __ andr(res_lo, lreg_lo, c_lo);
1780 } else {
1781 __ bic(res_lo, lreg_lo, ~c_lo);
1782 }
1783 if (res_hi != lreg_hi) {
1784 __ mov(res_hi, lreg_hi);
1785 }
1786 } else {
1787 BAILOUT("64 bit constant cannot be inlined");
1788 }
1789 }
1790 }
1791 }
1792
1793
1794
comp_op(LIR_Condition condition,LIR_Opr opr1,LIR_Opr opr2,LIR_Op2 * op)1795 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1796 if (opr1->is_single_cpu()) {
1797 if (opr2->is_constant()) {
1798 switch (opr2->as_constant_ptr()->type()) {
1799 case T_INT: {
1800 const jint c = opr2->as_constant_ptr()->as_jint();
1801 if (Assembler::is_arith_imm_in_range(c)) {
1802 __ cmp_32(opr1->as_register(), c);
1803 } else if (Assembler::is_arith_imm_in_range(-c)) {
1804 __ cmn_32(opr1->as_register(), -c);
1805 } else {
1806 // This can happen when compiling lookupswitch
1807 __ mov_slow(Rtemp, c);
1808 __ cmp_32(opr1->as_register(), Rtemp);
1809 }
1810 break;
1811 }
1812 case T_OBJECT:
1813 assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise");
1814 __ cmp(opr1->as_register(), 0);
1815 break;
1816 default:
1817 ShouldNotReachHere();
1818 }
1819 } else if (opr2->is_single_cpu()) {
1820 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1821 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type");
1822 __ cmpoop(opr1->as_register(), opr2->as_register());
1823 } else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) {
1824 assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type");
1825 __ cmp(opr1->as_register(), opr2->as_register());
1826 } else {
1827 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type");
1828 __ cmp_32(opr1->as_register(), opr2->as_register());
1829 }
1830 } else {
1831 ShouldNotReachHere();
1832 }
1833 } else if (opr1->is_double_cpu()) {
1834 Register xlo = opr1->as_register_lo();
1835 Register xhi = opr1->as_register_hi();
1836 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1837 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise");
1838 __ orrs(Rtemp, xlo, xhi);
1839 } else if (opr2->is_register()) {
1840 Register ylo = opr2->as_register_lo();
1841 Register yhi = opr2->as_register_hi();
1842 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1843 __ teq(xhi, yhi);
1844 __ teq(xlo, ylo, eq);
1845 } else {
1846 __ subs(xlo, xlo, ylo);
1847 __ sbcs(xhi, xhi, yhi);
1848 }
1849 } else {
1850 ShouldNotReachHere();
1851 }
1852 } else if (opr1->is_single_fpu()) {
1853 if (opr2->is_constant()) {
1854 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise");
1855 __ cmp_zero_float(opr1->as_float_reg());
1856 } else {
1857 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg());
1858 }
1859 } else if (opr1->is_double_fpu()) {
1860 if (opr2->is_constant()) {
1861 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise");
1862 __ cmp_zero_double(opr1->as_double_reg());
1863 } else {
1864 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg());
1865 }
1866 } else {
1867 ShouldNotReachHere();
1868 }
1869 }
1870
comp_fl2i(LIR_Code code,LIR_Opr left,LIR_Opr right,LIR_Opr dst,LIR_Op2 * op)1871 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1872 const Register res = dst->as_register();
1873 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1874 comp_op(lir_cond_unknown, left, right, op);
1875 __ fmstat();
1876 if (code == lir_ucmp_fd2i) { // unordered is less
1877 __ mvn(res, 0, lt);
1878 __ mov(res, 1, ge);
1879 } else { // unordered is greater
1880 __ mov(res, 1, cs);
1881 __ mvn(res, 0, cc);
1882 }
1883 __ mov(res, 0, eq);
1884
1885 } else {
1886 assert(code == lir_cmp_l2i, "must be");
1887
1888 Label done;
1889 const Register xlo = left->as_register_lo();
1890 const Register xhi = left->as_register_hi();
1891 const Register ylo = right->as_register_lo();
1892 const Register yhi = right->as_register_hi();
1893 __ cmp(xhi, yhi);
1894 __ mov(res, 1, gt);
1895 __ mvn(res, 0, lt);
1896 __ b(done, ne);
1897 __ subs(res, xlo, ylo);
1898 __ mov(res, 1, hi);
1899 __ mvn(res, 0, lo);
1900 __ bind(done);
1901 }
1902 }
1903
1904
align_call(LIR_Code code)1905 void LIR_Assembler::align_call(LIR_Code code) {
1906 // Not needed
1907 }
1908
1909
call(LIR_OpJavaCall * op,relocInfo::relocType rtype)1910 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) {
1911 int ret_addr_offset = __ patchable_call(op->addr(), rtype);
1912 assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
1913 add_call_info_here(op->info());
1914 }
1915
1916
ic_call(LIR_OpJavaCall * op)1917 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) {
1918 bool near_range = __ cache_fully_reachable();
1919 address oop_address = pc();
1920
1921 bool use_movw = VM_Version::supports_movw();
1922
1923 // Ricklass may contain something that is not a metadata pointer so
1924 // mov_metadata can't be used
1925 InlinedAddress value((address)Universe::non_oop_word());
1926 InlinedAddress addr(op->addr());
1927 if (use_movw) {
1928 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff);
1929 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16);
1930 } else {
1931 // No movw/movt, must be load a pc relative value but no
1932 // relocation so no metadata table to load from.
1933 // Use a b instruction rather than a bl, inline constant after the
1934 // branch, use a PC relative ldr to load the constant, arrange for
1935 // the call to return after the constant(s).
1936 __ ldr_literal(Ricklass, value);
1937 }
1938 __ relocate(virtual_call_Relocation::spec(oop_address));
1939 if (near_range && use_movw) {
1940 __ bl(op->addr());
1941 } else {
1942 Label call_return;
1943 __ adr(LR, call_return);
1944 if (near_range) {
1945 __ b(op->addr());
1946 } else {
1947 __ indirect_jump(addr, Rtemp);
1948 __ bind_literal(addr);
1949 }
1950 if (!use_movw) {
1951 __ bind_literal(value);
1952 }
1953 __ bind(call_return);
1954 }
1955 add_call_info(code_offset(), op->info());
1956 }
1957
1958
1959 /* Currently, vtable-dispatch is only enabled for sparc platforms */
vtable_call(LIR_OpJavaCall * op)1960 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
1961 ShouldNotReachHere();
1962 }
1963
emit_static_call_stub()1964 void LIR_Assembler::emit_static_call_stub() {
1965 address call_pc = __ pc();
1966 address stub = __ start_a_stub(call_stub_size());
1967 if (stub == NULL) {
1968 BAILOUT("static call stub overflow");
1969 }
1970
1971 DEBUG_ONLY(int offset = code_offset();)
1972
1973 InlinedMetadata metadata_literal(NULL);
1974 __ relocate(static_stub_Relocation::spec(call_pc));
1975 // If not a single instruction, NativeMovConstReg::next_instruction_address()
1976 // must jump over the whole following ldr_literal.
1977 // (See CompiledStaticCall::set_to_interpreted())
1978 #ifdef ASSERT
1979 address ldr_site = __ pc();
1980 #endif
1981 __ ldr_literal(Rmethod, metadata_literal);
1982 assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing");
1983 bool near_range = __ cache_fully_reachable();
1984 InlinedAddress dest((address)-1);
1985 if (near_range) {
1986 address branch_site = __ pc();
1987 __ b(branch_site); // b to self maps to special NativeJump -1 destination
1988 } else {
1989 __ indirect_jump(dest, Rtemp);
1990 }
1991 __ bind_literal(metadata_literal); // includes spec_for_immediate reloc
1992 if (!near_range) {
1993 __ bind_literal(dest); // special NativeJump -1 destination
1994 }
1995
1996 assert(code_offset() - offset <= call_stub_size(), "overflow");
1997 __ end_a_stub();
1998 }
1999
throw_op(LIR_Opr exceptionPC,LIR_Opr exceptionOop,CodeEmitInfo * info)2000 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2001 assert(exceptionOop->as_register() == Rexception_obj, "must match");
2002 assert(exceptionPC->as_register() == Rexception_pc, "must match");
2003 info->add_register_oop(exceptionOop);
2004
2005 Runtime1::StubID handle_id = compilation()->has_fpu_code() ?
2006 Runtime1::handle_exception_id :
2007 Runtime1::handle_exception_nofpu_id;
2008 Label return_address;
2009 __ adr(Rexception_pc, return_address);
2010 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type);
2011 __ bind(return_address);
2012 add_call_info_here(info); // for exception handler
2013 }
2014
unwind_op(LIR_Opr exceptionOop)2015 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2016 assert(exceptionOop->as_register() == Rexception_obj, "must match");
2017 __ b(_unwind_handler_entry);
2018 }
2019
shift_op(LIR_Code code,LIR_Opr left,LIR_Opr count,LIR_Opr dest,LIR_Opr tmp)2020 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2021 AsmShift shift = lsl;
2022 switch (code) {
2023 case lir_shl: shift = lsl; break;
2024 case lir_shr: shift = asr; break;
2025 case lir_ushr: shift = lsr; break;
2026 default: ShouldNotReachHere();
2027 }
2028
2029 if (dest->is_single_cpu()) {
2030 __ andr(Rtemp, count->as_register(), 31);
2031 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp));
2032 } else if (dest->is_double_cpu()) {
2033 Register dest_lo = dest->as_register_lo();
2034 Register dest_hi = dest->as_register_hi();
2035 Register src_lo = left->as_register_lo();
2036 Register src_hi = left->as_register_hi();
2037 Register Rcount = count->as_register();
2038 // Resolve possible register conflicts
2039 if (shift == lsl && dest_hi == src_lo) {
2040 dest_hi = Rtemp;
2041 } else if (shift != lsl && dest_lo == src_hi) {
2042 dest_lo = Rtemp;
2043 } else if (dest_lo == src_lo && dest_hi == src_hi) {
2044 dest_lo = Rtemp;
2045 } else if (dest_lo == Rcount || dest_hi == Rcount) {
2046 Rcount = Rtemp;
2047 }
2048 __ andr(Rcount, count->as_register(), 63);
2049 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount);
2050 move_regs(dest_lo, dest->as_register_lo());
2051 move_regs(dest_hi, dest->as_register_hi());
2052 } else {
2053 ShouldNotReachHere();
2054 }
2055 }
2056
2057
shift_op(LIR_Code code,LIR_Opr left,jint count,LIR_Opr dest)2058 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2059 AsmShift shift = lsl;
2060 switch (code) {
2061 case lir_shl: shift = lsl; break;
2062 case lir_shr: shift = asr; break;
2063 case lir_ushr: shift = lsr; break;
2064 default: ShouldNotReachHere();
2065 }
2066
2067 if (dest->is_single_cpu()) {
2068 count &= 31;
2069 if (count != 0) {
2070 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count));
2071 } else {
2072 move_regs(left->as_register(), dest->as_register());
2073 }
2074 } else if (dest->is_double_cpu()) {
2075 count &= 63;
2076 if (count != 0) {
2077 Register dest_lo = dest->as_register_lo();
2078 Register dest_hi = dest->as_register_hi();
2079 Register src_lo = left->as_register_lo();
2080 Register src_hi = left->as_register_hi();
2081 // Resolve possible register conflicts
2082 if (shift == lsl && dest_hi == src_lo) {
2083 dest_hi = Rtemp;
2084 } else if (shift != lsl && dest_lo == src_hi) {
2085 dest_lo = Rtemp;
2086 }
2087 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count);
2088 move_regs(dest_lo, dest->as_register_lo());
2089 move_regs(dest_hi, dest->as_register_hi());
2090 } else {
2091 __ long_move(dest->as_register_lo(), dest->as_register_hi(),
2092 left->as_register_lo(), left->as_register_hi());
2093 }
2094 } else {
2095 ShouldNotReachHere();
2096 }
2097 }
2098
2099
2100 // Saves 4 given registers in reserved argument area.
save_in_reserved_area(Register r1,Register r2,Register r3,Register r4)2101 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2102 verify_reserved_argument_area_size(4);
2103 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4));
2104 }
2105
2106 // Restores 4 given registers from reserved argument area.
restore_from_reserved_area(Register r1,Register r2,Register r3,Register r4)2107 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2108 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback);
2109 }
2110
2111
emit_arraycopy(LIR_OpArrayCopy * op)2112 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2113 ciArrayKlass* default_type = op->expected_type();
2114 Register src = op->src()->as_register();
2115 Register src_pos = op->src_pos()->as_register();
2116 Register dst = op->dst()->as_register();
2117 Register dst_pos = op->dst_pos()->as_register();
2118 Register length = op->length()->as_register();
2119 Register tmp = op->tmp()->as_register();
2120 Register tmp2 = Rtemp;
2121
2122 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption");
2123
2124 __ resolve(ACCESS_READ, src);
2125 __ resolve(ACCESS_WRITE, dst);
2126
2127 CodeStub* stub = op->stub();
2128
2129 int flags = op->flags();
2130 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2131 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2132
2133 // If we don't know anything or it's an object array, just go through the generic arraycopy
2134 if (default_type == NULL) {
2135
2136 // save arguments, because they will be killed by a runtime call
2137 save_in_reserved_area(R0, R1, R2, R3);
2138
2139 // pass length argument on SP[0]
2140 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment
2141
2142 address copyfunc_addr = StubRoutines::generic_arraycopy();
2143 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2144 #ifndef PRODUCT
2145 if (PrintC1Statistics) {
2146 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
2147 }
2148 #endif // !PRODUCT
2149 // the stub is in the code cache so close enough
2150 __ call(copyfunc_addr, relocInfo::runtime_call_type);
2151
2152 __ add(SP, SP, 2*wordSize);
2153
2154 __ cbz_32(R0, *stub->continuation());
2155
2156 __ mvn_32(tmp, R0);
2157 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
2158 __ sub_32(length, length, tmp);
2159 __ add_32(src_pos, src_pos, tmp);
2160 __ add_32(dst_pos, dst_pos, tmp);
2161
2162 __ b(*stub->entry());
2163
2164 __ bind(*stub->continuation());
2165 return;
2166 }
2167
2168 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(),
2169 "must be true at this point");
2170 int elem_size = type2aelembytes(basic_type);
2171 int shift = exact_log2(elem_size);
2172
2173 // Check for NULL
2174 if (flags & LIR_OpArrayCopy::src_null_check) {
2175 if (flags & LIR_OpArrayCopy::dst_null_check) {
2176 __ cmp(src, 0);
2177 __ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed
2178 __ b(*stub->entry(), eq);
2179 } else {
2180 __ cbz(src, *stub->entry());
2181 }
2182 } else if (flags & LIR_OpArrayCopy::dst_null_check) {
2183 __ cbz(dst, *stub->entry());
2184 }
2185
2186 // If the compiler was not able to prove that exact type of the source or the destination
2187 // of the arraycopy is an array type, check at runtime if the source or the destination is
2188 // an instance type.
2189 if (flags & LIR_OpArrayCopy::type_check) {
2190 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2191 __ load_klass(tmp, dst);
2192 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2193 __ mov_slow(tmp, Klass::_lh_neutral_value);
2194 __ cmp_32(tmp2, tmp);
2195 __ b(*stub->entry(), ge);
2196 }
2197
2198 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2199 __ load_klass(tmp, src);
2200 __ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2201 __ mov_slow(tmp, Klass::_lh_neutral_value);
2202 __ cmp_32(tmp2, tmp);
2203 __ b(*stub->entry(), ge);
2204 }
2205 }
2206
2207 // Check if negative
2208 const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check |
2209 LIR_OpArrayCopy::dst_pos_positive_check |
2210 LIR_OpArrayCopy::length_positive_check;
2211 switch (flags & all_positive_checks) {
2212 case LIR_OpArrayCopy::src_pos_positive_check:
2213 __ branch_if_negative_32(src_pos, *stub->entry());
2214 break;
2215 case LIR_OpArrayCopy::dst_pos_positive_check:
2216 __ branch_if_negative_32(dst_pos, *stub->entry());
2217 break;
2218 case LIR_OpArrayCopy::length_positive_check:
2219 __ branch_if_negative_32(length, *stub->entry());
2220 break;
2221 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check:
2222 __ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry());
2223 break;
2224 case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check:
2225 __ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry());
2226 break;
2227 case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check:
2228 __ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry());
2229 break;
2230 case all_positive_checks:
2231 __ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry());
2232 break;
2233 default:
2234 assert((flags & all_positive_checks) == 0, "the last option");
2235 }
2236
2237 // Range checks
2238 if (flags & LIR_OpArrayCopy::src_range_check) {
2239 __ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes()));
2240 __ add_32(tmp, src_pos, length);
2241 __ cmp_32(tmp, tmp2);
2242 __ b(*stub->entry(), hi);
2243 }
2244 if (flags & LIR_OpArrayCopy::dst_range_check) {
2245 __ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2246 __ add_32(tmp, dst_pos, length);
2247 __ cmp_32(tmp, tmp2);
2248 __ b(*stub->entry(), hi);
2249 }
2250
2251 // Check if src and dst are of the same type
2252 if (flags & LIR_OpArrayCopy::type_check) {
2253 // We don't know the array types are compatible
2254 if (basic_type != T_OBJECT) {
2255 // Simple test for basic type arrays
2256 if (UseCompressedClassPointers) {
2257 // We don't need decode because we just need to compare
2258 __ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
2259 __ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
2260 __ cmp_32(tmp, tmp2);
2261 } else {
2262 __ load_klass(tmp, src);
2263 __ load_klass(tmp2, dst);
2264 __ cmp(tmp, tmp2);
2265 }
2266 __ b(*stub->entry(), ne);
2267 } else {
2268 // For object arrays, if src is a sub class of dst then we can
2269 // safely do the copy.
2270 Label cont, slow;
2271
2272 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2273
2274 __ load_klass(tmp, src);
2275 __ load_klass(tmp2, dst);
2276
2277 // We are at a call so all live registers are saved before we
2278 // get here
2279 assert_different_registers(tmp, tmp2, R6, altFP_7_11);
2280
2281 __ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
2282
2283 __ mov(R6, R0);
2284 __ mov(altFP_7_11, R1);
2285 __ mov(R0, tmp);
2286 __ mov(R1, tmp2);
2287 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp
2288 __ cmp_32(R0, 0);
2289 __ mov(R0, R6);
2290 __ mov(R1, altFP_7_11);
2291
2292 if (copyfunc_addr != NULL) { // use stub if available
2293 // src is not a sub class of dst so we have to do a
2294 // per-element check.
2295
2296 __ b(cont, ne);
2297
2298 __ bind(slow);
2299
2300 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2301 if ((flags & mask) != mask) {
2302 // Check that at least both of them object arrays.
2303 assert(flags & mask, "one of the two should be known to be an object array");
2304
2305 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2306 __ load_klass(tmp, src);
2307 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2308 __ load_klass(tmp, dst);
2309 }
2310 int lh_offset = in_bytes(Klass::layout_helper_offset());
2311
2312 __ ldr_u32(tmp2, Address(tmp, lh_offset));
2313
2314 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2315 __ mov_slow(tmp, objArray_lh);
2316 __ cmp_32(tmp, tmp2);
2317 __ b(*stub->entry(), ne);
2318 }
2319
2320 save_in_reserved_area(R0, R1, R2, R3);
2321
2322 Register src_ptr = R0;
2323 Register dst_ptr = R1;
2324 Register len = R2;
2325 Register chk_off = R3;
2326 Register super_k = tmp;
2327
2328 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2329 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2330
2331 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2332 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2333 __ load_klass(tmp, dst);
2334
2335 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2336 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2337
2338 __ ldr(super_k, Address(tmp, ek_offset));
2339
2340 __ mov(len, length);
2341 __ ldr_u32(chk_off, Address(super_k, sco_offset));
2342 __ push(super_k);
2343
2344 __ call(copyfunc_addr, relocInfo::runtime_call_type);
2345
2346 #ifndef PRODUCT
2347 if (PrintC1Statistics) {
2348 Label failed;
2349 __ cbnz_32(R0, failed);
2350 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2);
2351 __ bind(failed);
2352 }
2353 #endif // PRODUCT
2354
2355 __ add(SP, SP, wordSize); // Drop super_k argument
2356
2357 __ cbz_32(R0, *stub->continuation());
2358 __ mvn_32(tmp, R0);
2359
2360 // load saved arguments in slow case only
2361 restore_from_reserved_area(R0, R1, R2, R3);
2362
2363 __ sub_32(length, length, tmp);
2364 __ add_32(src_pos, src_pos, tmp);
2365 __ add_32(dst_pos, dst_pos, tmp);
2366
2367 #ifndef PRODUCT
2368 if (PrintC1Statistics) {
2369 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2);
2370 }
2371 #endif
2372
2373 __ b(*stub->entry());
2374
2375 __ bind(cont);
2376 } else {
2377 __ b(*stub->entry(), eq);
2378 __ bind(cont);
2379 }
2380 }
2381 }
2382
2383 #ifndef PRODUCT
2384 if (PrintC1Statistics) {
2385 address counter = Runtime1::arraycopy_count_address(basic_type);
2386 __ inc_counter(counter, tmp, tmp2);
2387 }
2388 #endif // !PRODUCT
2389
2390 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2391 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2392 const char *name;
2393 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2394
2395 Register src_ptr = R0;
2396 Register dst_ptr = R1;
2397 Register len = R2;
2398
2399 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2400 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2401
2402 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2403 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2404
2405 __ mov(len, length);
2406
2407 __ call(entry, relocInfo::runtime_call_type);
2408
2409 __ bind(*stub->continuation());
2410 }
2411
2412 #ifdef ASSERT
2413 // emit run-time assertion
emit_assert(LIR_OpAssert * op)2414 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2415 assert(op->code() == lir_assert, "must be");
2416
2417 if (op->in_opr1()->is_valid()) {
2418 assert(op->in_opr2()->is_valid(), "both operands must be valid");
2419 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2420 } else {
2421 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2422 assert(op->condition() == lir_cond_always, "no other conditions allowed");
2423 }
2424
2425 Label ok;
2426 if (op->condition() != lir_cond_always) {
2427 AsmCondition acond = al;
2428 switch (op->condition()) {
2429 case lir_cond_equal: acond = eq; break;
2430 case lir_cond_notEqual: acond = ne; break;
2431 case lir_cond_less: acond = lt; break;
2432 case lir_cond_lessEqual: acond = le; break;
2433 case lir_cond_greaterEqual: acond = ge; break;
2434 case lir_cond_greater: acond = gt; break;
2435 case lir_cond_aboveEqual: acond = hs; break;
2436 case lir_cond_belowEqual: acond = ls; break;
2437 default: ShouldNotReachHere();
2438 }
2439 __ b(ok, acond);
2440 }
2441 if (op->halt()) {
2442 const char* str = __ code_string(op->msg());
2443 __ stop(str);
2444 } else {
2445 breakpoint();
2446 }
2447 __ bind(ok);
2448 }
2449 #endif // ASSERT
2450
emit_updatecrc32(LIR_OpUpdateCRC32 * op)2451 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2452 fatal("CRC32 intrinsic is not implemented on this platform");
2453 }
2454
emit_lock(LIR_OpLock * op)2455 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2456 Register obj = op->obj_opr()->as_pointer_register();
2457 Register hdr = op->hdr_opr()->as_pointer_register();
2458 Register lock = op->lock_opr()->as_pointer_register();
2459 Register tmp = op->scratch_opr()->is_illegal() ? noreg :
2460 op->scratch_opr()->as_pointer_register();
2461
2462 if (!UseFastLocking) {
2463 __ b(*op->stub()->entry());
2464 } else if (op->code() == lir_lock) {
2465 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2466 __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
2467 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2468 if (op->info() != NULL) {
2469 add_debug_info_for_null_check(null_check_offset, op->info());
2470 }
2471 } else if (op->code() == lir_unlock) {
2472 __ unlock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2473 } else {
2474 ShouldNotReachHere();
2475 }
2476 __ bind(*op->stub()->continuation());
2477 }
2478
2479
emit_profile_call(LIR_OpProfileCall * op)2480 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2481 ciMethod* method = op->profiled_method();
2482 int bci = op->profiled_bci();
2483 ciMethod* callee = op->profiled_callee();
2484
2485 // Update counter for all call types
2486 ciMethodData* md = method->method_data_or_null();
2487 assert(md != NULL, "Sanity");
2488 ciProfileData* data = md->bci_to_data(bci);
2489 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2490 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2491 Register mdo = op->mdo()->as_register();
2492 assert(op->tmp1()->is_register(), "tmp1 must be allocated");
2493 Register tmp1 = op->tmp1()->as_pointer_register();
2494 assert_different_registers(mdo, tmp1);
2495 __ mov_metadata(mdo, md->constant_encoding());
2496 int mdo_offset_bias = 0;
2497 int max_offset = 4096;
2498 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) {
2499 // The offset is large so bias the mdo by the base of the slot so
2500 // that the ldr can use an immediate offset to reference the slots of the data
2501 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2502 __ mov_slow(tmp1, mdo_offset_bias);
2503 __ add(mdo, mdo, tmp1);
2504 }
2505
2506 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2507 // Perform additional virtual call profiling for invokevirtual and
2508 // invokeinterface bytecodes
2509 if (op->should_profile_receiver_type()) {
2510 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2511 Register recv = op->recv()->as_register();
2512 assert_different_registers(mdo, tmp1, recv);
2513 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2514 ciKlass* known_klass = op->known_holder();
2515 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2516 // We know the type that will be seen at this call site; we can
2517 // statically update the MethodData* rather than needing to do
2518 // dynamic tests on the receiver type
2519
2520 // NOTE: we should probably put a lock around this search to
2521 // avoid collisions by concurrent compilations
2522 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2523 uint i;
2524 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2525 ciKlass* receiver = vc_data->receiver(i);
2526 if (known_klass->equals(receiver)) {
2527 Address data_addr(mdo, md->byte_offset_of_slot(data,
2528 VirtualCallData::receiver_count_offset(i)) -
2529 mdo_offset_bias);
2530 __ ldr(tmp1, data_addr);
2531 __ add(tmp1, tmp1, DataLayout::counter_increment);
2532 __ str(tmp1, data_addr);
2533 return;
2534 }
2535 }
2536
2537 // Receiver type not found in profile data; select an empty slot
2538
2539 // Note that this is less efficient than it should be because it
2540 // always does a write to the receiver part of the
2541 // VirtualCallData rather than just the first time
2542 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2543 ciKlass* receiver = vc_data->receiver(i);
2544 if (receiver == NULL) {
2545 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2546 mdo_offset_bias);
2547 __ mov_metadata(tmp1, known_klass->constant_encoding());
2548 __ str(tmp1, recv_addr);
2549 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2550 mdo_offset_bias);
2551 __ ldr(tmp1, data_addr);
2552 __ add(tmp1, tmp1, DataLayout::counter_increment);
2553 __ str(tmp1, data_addr);
2554 return;
2555 }
2556 }
2557 } else {
2558 __ load_klass(recv, recv);
2559 Label update_done;
2560 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2561 // Receiver did not match any saved receiver and there is no empty row for it.
2562 // Increment total counter to indicate polymorphic case.
2563 __ ldr(tmp1, counter_addr);
2564 __ add(tmp1, tmp1, DataLayout::counter_increment);
2565 __ str(tmp1, counter_addr);
2566
2567 __ bind(update_done);
2568 }
2569 } else {
2570 // Static call
2571 __ ldr(tmp1, counter_addr);
2572 __ add(tmp1, tmp1, DataLayout::counter_increment);
2573 __ str(tmp1, counter_addr);
2574 }
2575 }
2576
emit_profile_type(LIR_OpProfileType * op)2577 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2578 fatal("Type profiling not implemented on this platform");
2579 }
2580
emit_delay(LIR_OpDelay *)2581 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2582 Unimplemented();
2583 }
2584
2585
monitor_address(int monitor_no,LIR_Opr dst)2586 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2587 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2588 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp());
2589 }
2590
2591
align_backward_branch_target()2592 void LIR_Assembler::align_backward_branch_target() {
2593 // Some ARM processors do better with 8-byte branch target alignment
2594 __ align(8);
2595 }
2596
2597
negate(LIR_Opr left,LIR_Opr dest,LIR_Opr tmp)2598 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2599 // tmp must be unused
2600 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2601
2602 if (left->is_single_cpu()) {
2603 assert (dest->type() == T_INT, "unexpected result type");
2604 assert (left->type() == T_INT, "unexpected left type");
2605 __ neg_32(dest->as_register(), left->as_register());
2606 } else if (left->is_double_cpu()) {
2607 Register dest_lo = dest->as_register_lo();
2608 Register dest_hi = dest->as_register_hi();
2609 Register src_lo = left->as_register_lo();
2610 Register src_hi = left->as_register_hi();
2611 if (dest_lo == src_hi) {
2612 dest_lo = Rtemp;
2613 }
2614 __ rsbs(dest_lo, src_lo, 0);
2615 __ rsc(dest_hi, src_hi, 0);
2616 move_regs(dest_lo, dest->as_register_lo());
2617 } else if (left->is_single_fpu()) {
2618 __ neg_float(dest->as_float_reg(), left->as_float_reg());
2619 } else if (left->is_double_fpu()) {
2620 __ neg_double(dest->as_double_reg(), left->as_double_reg());
2621 } else {
2622 ShouldNotReachHere();
2623 }
2624 }
2625
2626
leal(LIR_Opr addr_opr,LIR_Opr dest,LIR_PatchCode patch_code,CodeEmitInfo * info)2627 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2628 assert(patch_code == lir_patch_none, "Patch code not supported");
2629 LIR_Address* addr = addr_opr->as_address_ptr();
2630 if (addr->index()->is_illegal()) {
2631 jint c = addr->disp();
2632 if (!Assembler::is_arith_imm_in_range(c)) {
2633 BAILOUT("illegal arithmetic operand");
2634 }
2635 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c);
2636 } else {
2637 assert(addr->disp() == 0, "cannot handle otherwise");
2638 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(),
2639 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale()));
2640 }
2641 }
2642
2643
rt_call(LIR_Opr result,address dest,const LIR_OprList * args,LIR_Opr tmp,CodeEmitInfo * info)2644 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2645 assert(!tmp->is_valid(), "don't need temporary");
2646 __ call(dest);
2647 if (info != NULL) {
2648 add_call_info_here(info);
2649 }
2650 }
2651
2652
volatile_move_op(LIR_Opr src,LIR_Opr dest,BasicType type,CodeEmitInfo * info)2653 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2654 assert(src->is_double_cpu() && dest->is_address() ||
2655 src->is_address() && dest->is_double_cpu(),
2656 "Simple move_op is called for all other cases");
2657
2658 int null_check_offset;
2659 if (dest->is_address()) {
2660 // Store
2661 const LIR_Address* addr = dest->as_address_ptr();
2662 const Register src_lo = src->as_register_lo();
2663 const Register src_hi = src->as_register_hi();
2664 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2665
2666 if (src_lo < src_hi) {
2667 null_check_offset = __ offset();
2668 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi));
2669 } else {
2670 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register");
2671 __ mov(Rtemp, src_hi);
2672 null_check_offset = __ offset();
2673 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp));
2674 }
2675 } else {
2676 // Load
2677 const LIR_Address* addr = src->as_address_ptr();
2678 const Register dest_lo = dest->as_register_lo();
2679 const Register dest_hi = dest->as_register_hi();
2680 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2681
2682 null_check_offset = __ offset();
2683 if (dest_lo < dest_hi) {
2684 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi));
2685 } else {
2686 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register");
2687 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp));
2688 __ mov(dest_hi, Rtemp);
2689 }
2690 }
2691
2692 if (info != NULL) {
2693 add_debug_info_for_null_check(null_check_offset, info);
2694 }
2695 }
2696
2697
membar()2698 void LIR_Assembler::membar() {
2699 __ membar(MacroAssembler::StoreLoad, Rtemp);
2700 }
2701
membar_acquire()2702 void LIR_Assembler::membar_acquire() {
2703 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
2704 }
2705
membar_release()2706 void LIR_Assembler::membar_release() {
2707 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2708 }
2709
membar_loadload()2710 void LIR_Assembler::membar_loadload() {
2711 __ membar(MacroAssembler::LoadLoad, Rtemp);
2712 }
2713
membar_storestore()2714 void LIR_Assembler::membar_storestore() {
2715 __ membar(MacroAssembler::StoreStore, Rtemp);
2716 }
2717
membar_loadstore()2718 void LIR_Assembler::membar_loadstore() {
2719 __ membar(MacroAssembler::LoadStore, Rtemp);
2720 }
2721
membar_storeload()2722 void LIR_Assembler::membar_storeload() {
2723 __ membar(MacroAssembler::StoreLoad, Rtemp);
2724 }
2725
on_spin_wait()2726 void LIR_Assembler::on_spin_wait() {
2727 Unimplemented();
2728 }
2729
get_thread(LIR_Opr result_reg)2730 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2731 // Not used on ARM
2732 Unimplemented();
2733 }
2734
peephole(LIR_List * lir)2735 void LIR_Assembler::peephole(LIR_List* lir) {
2736 LIR_OpList* inst = lir->instructions_list();
2737 const int inst_length = inst->length();
2738 for (int i = 0; i < inst_length; i++) {
2739 LIR_Op* op = inst->at(i);
2740 switch (op->code()) {
2741 case lir_cmp: {
2742 // Replace:
2743 // cmp rX, y
2744 // cmove [EQ] y, z, rX
2745 // with
2746 // cmp rX, y
2747 // cmove [EQ] illegalOpr, z, rX
2748 //
2749 // or
2750 // cmp rX, y
2751 // cmove [NE] z, y, rX
2752 // with
2753 // cmp rX, y
2754 // cmove [NE] z, illegalOpr, rX
2755 //
2756 // moves from illegalOpr should be removed when converting LIR to native assembly
2757
2758 LIR_Op2* cmp = op->as_Op2();
2759 assert(cmp != NULL, "cmp LIR instruction is not an op2");
2760
2761 if (i + 1 < inst_length) {
2762 LIR_Op2* cmove = inst->at(i + 1)->as_Op2();
2763 if (cmove != NULL && cmove->code() == lir_cmove) {
2764 LIR_Opr cmove_res = cmove->result_opr();
2765 bool res_is_op1 = cmove_res == cmp->in_opr1();
2766 bool res_is_op2 = cmove_res == cmp->in_opr2();
2767 LIR_Opr cmp_res, cmp_arg;
2768 if (res_is_op1) {
2769 cmp_res = cmp->in_opr1();
2770 cmp_arg = cmp->in_opr2();
2771 } else if (res_is_op2) {
2772 cmp_res = cmp->in_opr2();
2773 cmp_arg = cmp->in_opr1();
2774 } else {
2775 cmp_res = LIR_OprFact::illegalOpr;
2776 cmp_arg = LIR_OprFact::illegalOpr;
2777 }
2778
2779 if (cmp_res != LIR_OprFact::illegalOpr) {
2780 LIR_Condition cond = cmove->condition();
2781 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) {
2782 cmove->set_in_opr1(LIR_OprFact::illegalOpr);
2783 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) {
2784 cmove->set_in_opr2(LIR_OprFact::illegalOpr);
2785 }
2786 }
2787 }
2788 }
2789 break;
2790 }
2791
2792 default:
2793 break;
2794 }
2795 }
2796 }
2797
atomic_op(LIR_Code code,LIR_Opr src,LIR_Opr data,LIR_Opr dest,LIR_Opr tmp)2798 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2799 assert(src->is_address(), "sanity");
2800 Address addr = as_Address(src->as_address_ptr());
2801
2802 if (code == lir_xchg) {
2803 } else {
2804 assert (!data->is_oop(), "xadd for oops");
2805 }
2806
2807 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2808
2809 Label retry;
2810 __ bind(retry);
2811
2812 if (data->type() == T_INT || data->is_oop()) {
2813 Register dst = dest->as_register();
2814 Register new_val = noreg;
2815 __ ldrex(dst, addr);
2816 if (code == lir_xadd) {
2817 Register tmp_reg = tmp->as_register();
2818 if (data->is_constant()) {
2819 assert_different_registers(dst, tmp_reg);
2820 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint());
2821 } else {
2822 assert_different_registers(dst, tmp_reg, data->as_register());
2823 __ add_32(tmp_reg, dst, data->as_register());
2824 }
2825 new_val = tmp_reg;
2826 } else {
2827 if (UseCompressedOops && data->is_oop()) {
2828 new_val = tmp->as_pointer_register();
2829 } else {
2830 new_val = data->as_register();
2831 }
2832 assert_different_registers(dst, new_val);
2833 }
2834 __ strex(Rtemp, new_val, addr);
2835
2836 } else if (data->type() == T_LONG) {
2837 Register dst_lo = dest->as_register_lo();
2838 Register new_val_lo = noreg;
2839 Register dst_hi = dest->as_register_hi();
2840
2841 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair");
2842 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair");
2843
2844 __ bind(retry);
2845 __ ldrexd(dst_lo, addr);
2846 if (code == lir_xadd) {
2847 Register tmp_lo = tmp->as_register_lo();
2848 Register tmp_hi = tmp->as_register_hi();
2849
2850 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
2851 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
2852
2853 if (data->is_constant()) {
2854 jlong c = data->as_constant_ptr()->as_jlong();
2855 assert((jlong)((jint)c) == c, "overflow");
2856 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi);
2857 __ adds(tmp_lo, dst_lo, (jint)c);
2858 __ adc(tmp_hi, dst_hi, 0);
2859 } else {
2860 Register new_val_lo = data->as_register_lo();
2861 Register new_val_hi = data->as_register_hi();
2862 __ adds(tmp_lo, dst_lo, new_val_lo);
2863 __ adc(tmp_hi, dst_hi, new_val_hi);
2864 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi);
2865 }
2866 new_val_lo = tmp_lo;
2867 } else {
2868 new_val_lo = data->as_register_lo();
2869 Register new_val_hi = data->as_register_hi();
2870
2871 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi);
2872 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair");
2873 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair");
2874 }
2875 __ strexd(Rtemp, new_val_lo, addr);
2876 } else {
2877 ShouldNotReachHere();
2878 }
2879
2880 __ cbnz_32(Rtemp, retry);
2881 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
2882
2883 }
2884
2885 #undef __
2886