1 /*
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArray.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "ci/ciUtilities.hpp"
38 #include "gc/shared/c1/barrierSetC1.hpp"
39 #include "gc/shared/cardTable.hpp"
40 #include "gc/shared/cardTableBarrierSet.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "vmreg_arm.inline.hpp"
44
45 #ifdef ASSERT
46 #define __ gen()->lir(__FILE__, __LINE__)->
47 #else
48 #define __ gen()->lir()->
49 #endif
50
load_byte_item()51 void LIRItem::load_byte_item() {
52 load_item();
53 }
54
load_nonconstant()55 void LIRItem::load_nonconstant() {
56 LIR_Opr r = value()->operand();
57 if (_gen->can_inline_as_constant(value())) {
58 if (!r->is_constant()) {
59 r = LIR_OprFact::value_type(value()->type());
60 }
61 _result = r;
62 } else {
63 load_item();
64 }
65 }
66
67 //--------------------------------------------------------------
68 // LIRGenerator
69 //--------------------------------------------------------------
70
71
exceptionOopOpr()72 LIR_Opr LIRGenerator::exceptionOopOpr() {
73 return FrameMap::Exception_oop_opr;
74 }
75
exceptionPcOpr()76 LIR_Opr LIRGenerator::exceptionPcOpr() {
77 return FrameMap::Exception_pc_opr;
78 }
79
syncLockOpr()80 LIR_Opr LIRGenerator::syncLockOpr() {
81 return new_register(T_INT);
82 }
83
syncTempOpr()84 LIR_Opr LIRGenerator::syncTempOpr() {
85 return new_register(T_OBJECT);
86 }
87
getThreadTemp()88 LIR_Opr LIRGenerator::getThreadTemp() {
89 return LIR_OprFact::illegalOpr;
90 }
91
atomicLockOpr()92 LIR_Opr LIRGenerator::atomicLockOpr() {
93 return LIR_OprFact::illegalOpr;
94 }
95
result_register_for(ValueType * type,bool callee)96 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
97 LIR_Opr opr;
98 switch (type->tag()) {
99 case intTag: opr = FrameMap::Int_result_opr; break;
100 case objectTag: opr = FrameMap::Object_result_opr; break;
101 case longTag: opr = FrameMap::Long_result_opr; break;
102 case floatTag: opr = FrameMap::Float_result_opr; break;
103 case doubleTag: opr = FrameMap::Double_result_opr; break;
104 case addressTag:
105 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
106 }
107 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
108 return opr;
109 }
110
111
rlock_byte(BasicType type)112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
113 return new_register(T_INT);
114 }
115
116
117 //--------- loading items into registers --------------------------------
118
119
can_store_as_constant(Value v,BasicType type) const120 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
121 return false;
122 }
123
124
can_inline_as_constant(Value v) const125 bool LIRGenerator::can_inline_as_constant(Value v) const {
126 if (v->type()->as_IntConstant() != NULL) {
127 return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value());
128 } else if (v->type()->as_ObjectConstant() != NULL) {
129 return v->type()->as_ObjectConstant()->value()->is_null_object();
130 } else if (v->type()->as_FloatConstant() != NULL) {
131 return v->type()->as_FloatConstant()->value() == 0.0f;
132 } else if (v->type()->as_DoubleConstant() != NULL) {
133 return v->type()->as_DoubleConstant()->value() == 0.0;
134 }
135 return false;
136 }
137
138
can_inline_as_constant(LIR_Const * c) const139 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
140 ShouldNotCallThis(); // Not used on ARM
141 return false;
142 }
143
144
145
146
safepoint_poll_register()147 LIR_Opr LIRGenerator::safepoint_poll_register() {
148 return LIR_OprFact::illegalOpr;
149 }
150
151
make_constant(BasicType type,jlong c)152 static LIR_Opr make_constant(BasicType type, jlong c) {
153 switch (type) {
154 case T_ADDRESS:
155 case T_OBJECT: return LIR_OprFact::intptrConst(c);
156 case T_LONG: return LIR_OprFact::longConst(c);
157 case T_INT: return LIR_OprFact::intConst(c);
158 default: ShouldNotReachHere();
159 return LIR_OprFact::intConst(-1);
160 }
161 }
162
163
164
add_large_constant(LIR_Opr src,int c,LIR_Opr dest)165 void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) {
166 assert(c != 0, "must be");
167 // Find first non-zero bit
168 int shift = 0;
169 while ((c & (3 << shift)) == 0) {
170 shift += 2;
171 }
172 // Add the least significant part of the constant
173 int mask = 0xff << shift;
174 __ add(src, LIR_OprFact::intConst(c & mask), dest);
175 // Add up to 3 other parts of the constant;
176 // each of them can be represented as rotated_imm
177 if (c & (mask << 8)) {
178 __ add(dest, LIR_OprFact::intConst(c & (mask << 8)), dest);
179 }
180 if (c & (mask << 16)) {
181 __ add(dest, LIR_OprFact::intConst(c & (mask << 16)), dest);
182 }
183 if (c & (mask << 24)) {
184 __ add(dest, LIR_OprFact::intConst(c & (mask << 24)), dest);
185 }
186 }
187
make_address(LIR_Opr base,LIR_Opr index,LIR_Address::Scale scale,BasicType type)188 static LIR_Address* make_address(LIR_Opr base, LIR_Opr index, LIR_Address::Scale scale, BasicType type) {
189 return new LIR_Address(base, index, scale, 0, type);
190 }
191
generate_address(LIR_Opr base,LIR_Opr index,int shift,int disp,BasicType type)192 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
193 int shift, int disp, BasicType type) {
194 assert(base->is_register(), "must be");
195
196 if (index->is_constant()) {
197 disp += index->as_constant_ptr()->as_jint() << shift;
198 index = LIR_OprFact::illegalOpr;
199 }
200
201 if (base->type() == T_LONG) {
202 LIR_Opr tmp = new_register(T_INT);
203 __ convert(Bytecodes::_l2i, base, tmp);
204 base = tmp;
205 }
206 if (index != LIR_OprFact::illegalOpr && index->type() == T_LONG) {
207 LIR_Opr tmp = new_register(T_INT);
208 __ convert(Bytecodes::_l2i, index, tmp);
209 index = tmp;
210 }
211 // At this point base and index should be all ints and not constants
212 assert(base->is_single_cpu() && !base->is_constant(), "base should be an non-constant int");
213 assert(index->is_illegal() || (index->type() == T_INT && !index->is_constant()), "index should be an non-constant int");
214
215 int max_disp;
216 bool disp_is_in_range;
217 bool embedded_shift;
218
219 switch (type) {
220 case T_BYTE:
221 case T_SHORT:
222 case T_CHAR:
223 max_disp = 256; // ldrh, ldrsb encoding has 8-bit offset
224 embedded_shift = false;
225 break;
226 case T_FLOAT:
227 case T_DOUBLE:
228 max_disp = 1024; // flds, fldd have 8-bit offset multiplied by 4
229 embedded_shift = false;
230 break;
231 case T_LONG:
232 max_disp = 4096;
233 embedded_shift = false;
234 break;
235 default:
236 max_disp = 4096; // ldr, ldrb allow 12-bit offset
237 embedded_shift = true;
238 }
239
240 disp_is_in_range = (-max_disp < disp && disp < max_disp);
241
242 if (index->is_register()) {
243 LIR_Opr tmp = new_pointer_register();
244 if (!disp_is_in_range) {
245 add_large_constant(base, disp, tmp);
246 base = tmp;
247 disp = 0;
248 }
249 LIR_Address* addr = make_address(base, index, (LIR_Address::Scale)shift, type);
250 if (disp == 0 && embedded_shift) {
251 // can use ldr/str instruction with register index
252 return addr;
253 } else {
254 LIR_Opr tmp = new_pointer_register();
255 __ add(base, LIR_OprFact::address(addr), tmp); // add with shifted/extended register
256 return new LIR_Address(tmp, disp, type);
257 }
258 }
259
260 // If the displacement is too large to be inlined into LDR instruction,
261 // generate large constant with additional sequence of ADD instructions
262 int excess_disp = disp & ~(max_disp - 1);
263 if (excess_disp != 0) {
264 LIR_Opr tmp = new_pointer_register();
265 add_large_constant(base, excess_disp, tmp);
266 base = tmp;
267 }
268 return new LIR_Address(base, disp & (max_disp - 1), type);
269 }
270
271
emit_array_address(LIR_Opr array_opr,LIR_Opr index_opr,BasicType type)272 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type) {
273 int base_offset = arrayOopDesc::base_offset_in_bytes(type);
274 int elem_size = type2aelembytes(type);
275
276 if (index_opr->is_constant()) {
277 int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size;
278 return generate_address(array_opr, offset, type);
279 } else {
280 assert(index_opr->is_register(), "must be");
281 int scale = exact_log2(elem_size);
282 return generate_address(array_opr, index_opr, scale, base_offset, type);
283 }
284 }
285
286
load_immediate(int x,BasicType type)287 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
288 assert(type == T_LONG || type == T_INT, "should be");
289 LIR_Opr r = make_constant(type, x);
290 bool imm_in_range = AsmOperand::is_rotated_imm(x);
291 if (!imm_in_range) {
292 LIR_Opr tmp = new_register(type);
293 __ move(r, tmp);
294 return tmp;
295 }
296 return r;
297 }
298
299
increment_counter(address counter,BasicType type,int step)300 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
301 LIR_Opr pointer = new_pointer_register();
302 __ move(LIR_OprFact::intptrConst(counter), pointer);
303 LIR_Address* addr = new LIR_Address(pointer, type);
304 increment_counter(addr, step);
305 }
306
307
increment_counter(LIR_Address * addr,int step)308 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
309 LIR_Opr temp = new_register(addr->type());
310 __ move(addr, temp);
311 __ add(temp, make_constant(addr->type(), step), temp);
312 __ move(temp, addr);
313 }
314
315
cmp_mem_int(LIR_Condition condition,LIR_Opr base,int disp,int c,CodeEmitInfo * info)316 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
317 __ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info);
318 __ cmp(condition, FrameMap::LR_opr, c);
319 }
320
321
cmp_reg_mem(LIR_Condition condition,LIR_Opr reg,LIR_Opr base,int disp,BasicType type,CodeEmitInfo * info)322 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
323 __ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info);
324 __ cmp(condition, reg, FrameMap::LR_opr);
325 }
326
327
strength_reduce_multiply(LIR_Opr left,int c,LIR_Opr result,LIR_Opr tmp)328 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
329 assert(left != result, "should be different registers");
330 if (is_power_of_2(c + 1)) {
331 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c + 1);
332 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT);
333 __ sub(LIR_OprFact::address(addr), left, result); // rsb with shifted register
334 return true;
335 } else if (is_power_of_2(c - 1)) {
336 LIR_Address::Scale scale = (LIR_Address::Scale) log2_intptr(c - 1);
337 LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT);
338 __ add(left, LIR_OprFact::address(addr), result); // add with shifted register
339 return true;
340 }
341 return false;
342 }
343
344
store_stack_parameter(LIR_Opr item,ByteSize offset_from_sp)345 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
346 assert(item->type() == T_INT, "other types are not expected");
347 __ store(item, new LIR_Address(FrameMap::SP_opr, in_bytes(offset_from_sp), item->type()));
348 }
349
set_card(LIR_Opr value,LIR_Address * card_addr)350 void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
351 assert(CardTable::dirty_card_val() == 0,
352 "Cannot use the register containing the card table base address directly");
353 if((ci_card_table_address_as<intx>() & 0xff) == 0) {
354 // If the card table base address is aligned to 256 bytes, we can use the register
355 // that contains the card_table_base_address.
356 __ move(value, card_addr);
357 } else {
358 // Otherwise we need to create a register containing that value.
359 LIR_Opr tmp_zero = new_register(T_INT);
360 __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
361 __ move(tmp_zero, card_addr);
362 }
363 }
364
CardTableBarrierSet_post_barrier_helper(LIR_OprDesc * addr,LIR_Const * card_table_base)365 void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
366 assert(addr->is_register(), "must be a register at this point");
367
368 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
369 CardTable* ct = ctbs->card_table();
370
371 LIR_Opr tmp = FrameMap::LR_ptr_opr;
372
373 bool load_card_table_base_const = VM_Version::supports_movw();
374 if (load_card_table_base_const) {
375 __ move((LIR_Opr)card_table_base, tmp);
376 } else {
377 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
378 }
379
380 // Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
381 // byte instruction does not support the addressing mode we need.
382 LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
383 if (UseCondCardMark) {
384 if (ct->scanned_concurrently()) {
385 __ membar_storeload();
386 }
387 LIR_Opr cur_value = new_register(T_INT);
388 __ move(card_addr, cur_value);
389
390 LabelObj* L_already_dirty = new LabelObj();
391 __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
392 __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
393 set_card(tmp, card_addr);
394 __ branch_destination(L_already_dirty->label());
395 } else {
396 if (ct->scanned_concurrently()) {
397 __ membar_storestore();
398 }
399 set_card(tmp, card_addr);
400 }
401 }
402
array_store_check(LIR_Opr value,LIR_Opr array,CodeEmitInfo * store_check_info,ciMethod * profiled_method,int profiled_bci)403 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
404 LIR_Opr tmp1 = FrameMap::R0_oop_opr;
405 LIR_Opr tmp2 = FrameMap::R1_oop_opr;
406 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
407 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
408 }
409
410 //----------------------------------------------------------------------
411 // visitor functions
412 //----------------------------------------------------------------------
413
do_MonitorEnter(MonitorEnter * x)414 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
415 assert(x->is_pinned(),"");
416 LIRItem obj(x->obj(), this);
417 obj.load_item();
418 set_no_result(x);
419
420 LIR_Opr lock = new_pointer_register();
421 LIR_Opr hdr = new_pointer_register();
422
423 // Need a scratch register for biased locking on arm
424 LIR_Opr scratch = LIR_OprFact::illegalOpr;
425 if(UseBiasedLocking) {
426 scratch = new_pointer_register();
427 } else {
428 scratch = atomicLockOpr();
429 }
430
431 CodeEmitInfo* info_for_exception = NULL;
432 if (x->needs_null_check()) {
433 info_for_exception = state_for(x);
434 }
435
436 CodeEmitInfo* info = state_for(x, x->state(), true);
437 monitor_enter(obj.result(), lock, hdr, scratch,
438 x->monitor_no(), info_for_exception, info);
439 }
440
441
do_MonitorExit(MonitorExit * x)442 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
443 assert(x->is_pinned(),"");
444 LIRItem obj(x->obj(), this);
445 obj.dont_load_item();
446 set_no_result(x);
447
448 LIR_Opr obj_temp = new_pointer_register();
449 LIR_Opr lock = new_pointer_register();
450 LIR_Opr hdr = new_pointer_register();
451
452 monitor_exit(obj_temp, lock, hdr, atomicLockOpr(), x->monitor_no());
453 }
454
455
456 // _ineg, _lneg, _fneg, _dneg
do_NegateOp(NegateOp * x)457 void LIRGenerator::do_NegateOp(NegateOp* x) {
458 #ifdef __SOFTFP__
459 address runtime_func = NULL;
460 ValueTag tag = x->type()->tag();
461 if (tag == floatTag) {
462 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fneg);
463 } else if (tag == doubleTag) {
464 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dneg);
465 }
466 if (runtime_func != NULL) {
467 set_result(x, call_runtime(x->x(), runtime_func, x->type(), NULL));
468 return;
469 }
470 #endif // __SOFTFP__
471 LIRItem value(x->x(), this);
472 value.load_item();
473 LIR_Opr reg = rlock_result(x);
474 __ negate(value.result(), reg);
475 }
476
477
478 // for _fadd, _fmul, _fsub, _fdiv, _frem
479 // _dadd, _dmul, _dsub, _ddiv, _drem
do_ArithmeticOp_FPU(ArithmeticOp * x)480 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
481 address runtime_func;
482 switch (x->op()) {
483 case Bytecodes::_frem:
484 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
485 break;
486 case Bytecodes::_drem:
487 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
488 break;
489 #ifdef __SOFTFP__
490 // Call function compiled with -msoft-float.
491
492 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
493
494 case Bytecodes::_fadd:
495 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc);
496 break;
497 case Bytecodes::_fmul:
498 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fmul);
499 break;
500 case Bytecodes::_fsub:
501 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc);
502 break;
503 case Bytecodes::_fdiv:
504 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fdiv);
505 break;
506 case Bytecodes::_dadd:
507 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc);
508 break;
509 case Bytecodes::_dmul:
510 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dmul);
511 break;
512 case Bytecodes::_dsub:
513 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc);
514 break;
515 case Bytecodes::_ddiv:
516 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_ddiv);
517 break;
518 default:
519 ShouldNotReachHere();
520 #else // __SOFTFP__
521 default: {
522 LIRItem left(x->x(), this);
523 LIRItem right(x->y(), this);
524 left.load_item();
525 right.load_item();
526 rlock_result(x);
527 arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
528 return;
529 }
530 #endif // __SOFTFP__
531 }
532
533 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
534 set_result(x, result);
535 }
536
537
make_div_by_zero_check(LIR_Opr right_arg,BasicType type,CodeEmitInfo * info)538 void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info) {
539 assert(right_arg->is_register(), "must be");
540 __ cmp(lir_cond_equal, right_arg, make_constant(type, 0));
541 __ branch(lir_cond_equal, type, new DivByZeroStub(info));
542 }
543
544
545 // for _ladd, _lmul, _lsub, _ldiv, _lrem
do_ArithmeticOp_Long(ArithmeticOp * x)546 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
547 CodeEmitInfo* info = NULL;
548 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
549 info = state_for(x);
550 }
551
552 switch (x->op()) {
553 case Bytecodes::_ldiv:
554 case Bytecodes::_lrem: {
555 LIRItem right(x->y(), this);
556 right.load_item();
557 make_div_by_zero_check(right.result(), T_LONG, info);
558 }
559 // Fall through
560 case Bytecodes::_lmul: {
561 address entry;
562 switch (x->op()) {
563 case Bytecodes::_lrem:
564 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
565 break;
566 case Bytecodes::_ldiv:
567 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
568 break;
569 case Bytecodes::_lmul:
570 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
571 break;
572 default:
573 ShouldNotReachHere();
574 return;
575 }
576 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
577 set_result(x, result);
578 break;
579 }
580 case Bytecodes::_ladd:
581 case Bytecodes::_lsub: {
582 LIRItem left(x->x(), this);
583 LIRItem right(x->y(), this);
584 left.load_item();
585 right.load_item();
586 rlock_result(x);
587 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
588 break;
589 }
590 default:
591 ShouldNotReachHere();
592 }
593 }
594
595
596 // for: _iadd, _imul, _isub, _idiv, _irem
do_ArithmeticOp_Int(ArithmeticOp * x)597 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
598 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
599 LIRItem left(x->x(), this);
600 LIRItem right(x->y(), this);
601 LIRItem* left_arg = &left;
602 LIRItem* right_arg = &right;
603
604 // Test if instr is commutative and if we should swap
605 if (x->is_commutative() && left.is_constant()) {
606 left_arg = &right;
607 right_arg = &left;
608 }
609
610 if (is_div_rem) {
611 CodeEmitInfo* info = state_for(x);
612 if (x->op() == Bytecodes::_idiv && right_arg->is_constant() && is_power_of_2(right_arg->get_jint_constant())) {
613 left_arg->load_item();
614 right_arg->dont_load_item();
615 LIR_Opr tmp = LIR_OprFact::illegalOpr;
616 LIR_Opr result = rlock_result(x);
617 __ idiv(left_arg->result(), right_arg->result(), result, tmp, info);
618 } else {
619 left_arg->load_item_force(FrameMap::R0_opr);
620 right_arg->load_item_force(FrameMap::R2_opr);
621 LIR_Opr tmp = FrameMap::R1_opr;
622 LIR_Opr result = rlock_result(x);
623 LIR_Opr out_reg;
624 if (x->op() == Bytecodes::_irem) {
625 out_reg = FrameMap::R0_opr;
626 __ irem(left_arg->result(), right_arg->result(), out_reg, tmp, info);
627 } else { // (x->op() == Bytecodes::_idiv)
628 out_reg = FrameMap::R1_opr;
629 __ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info);
630 }
631 __ move(out_reg, result);
632 }
633
634
635 } else {
636 left_arg->load_item();
637 if (x->op() == Bytecodes::_imul && right_arg->is_constant()) {
638 jint c = right_arg->get_jint_constant();
639 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
640 right_arg->dont_load_item();
641 } else {
642 right_arg->load_item();
643 }
644 } else {
645 right_arg->load_nonconstant();
646 }
647 rlock_result(x);
648 assert(right_arg->is_constant() || right_arg->is_register(), "wrong state of right");
649 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), NULL);
650 }
651 }
652
653
do_ArithmeticOp(ArithmeticOp * x)654 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
655 ValueTag tag = x->type()->tag();
656 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
657 switch (tag) {
658 case floatTag:
659 case doubleTag: do_ArithmeticOp_FPU(x); return;
660 case longTag: do_ArithmeticOp_Long(x); return;
661 case intTag: do_ArithmeticOp_Int(x); return;
662 default: ShouldNotReachHere(); return;
663 }
664 }
665
666
667 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
do_ShiftOp(ShiftOp * x)668 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
669 LIRItem value(x->x(), this);
670 LIRItem count(x->y(), this);
671
672 if (value.type()->is_long()) {
673 count.set_destroys_register();
674 }
675
676 if (count.is_constant()) {
677 assert(count.type()->as_IntConstant() != NULL, "should be");
678 count.dont_load_item();
679 } else {
680 count.load_item();
681 }
682 value.load_item();
683
684 LIR_Opr res = rlock_result(x);
685 shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr);
686 }
687
688
689 // _iand, _land, _ior, _lor, _ixor, _lxor
do_LogicOp(LogicOp * x)690 void LIRGenerator::do_LogicOp(LogicOp* x) {
691 LIRItem left(x->x(), this);
692 LIRItem right(x->y(), this);
693
694 left.load_item();
695
696 right.load_nonconstant();
697
698 logic_op(x->op(), rlock_result(x), left.result(), right.result());
699 }
700
701
702 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
do_CompareOp(CompareOp * x)703 void LIRGenerator::do_CompareOp(CompareOp* x) {
704 #ifdef __SOFTFP__
705 address runtime_func;
706 switch (x->op()) {
707 case Bytecodes::_fcmpl:
708 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl);
709 break;
710 case Bytecodes::_fcmpg:
711 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg);
712 break;
713 case Bytecodes::_dcmpl:
714 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl);
715 break;
716 case Bytecodes::_dcmpg:
717 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg);
718 break;
719 case Bytecodes::_lcmp: {
720 LIRItem left(x->x(), this);
721 LIRItem right(x->y(), this);
722 left.load_item();
723 right.load_nonconstant();
724 LIR_Opr reg = rlock_result(x);
725 __ lcmp2int(left.result(), right.result(), reg);
726 return;
727 }
728 default:
729 ShouldNotReachHere();
730 }
731 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
732 set_result(x, result);
733 #else // __SOFTFP__
734 LIRItem left(x->x(), this);
735 LIRItem right(x->y(), this);
736 left.load_item();
737
738 right.load_nonconstant();
739
740 LIR_Opr reg = rlock_result(x);
741
742 if (x->x()->type()->is_float_kind()) {
743 Bytecodes::Code code = x->op();
744 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
745 } else if (x->x()->type()->tag() == longTag) {
746 __ lcmp2int(left.result(), right.result(), reg);
747 } else {
748 ShouldNotReachHere();
749 }
750 #endif // __SOFTFP__
751 }
752
atomic_cmpxchg(BasicType type,LIR_Opr addr,LIRItem & cmp_value,LIRItem & new_value)753 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
754 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
755 LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
756 LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
757 new_value.load_item();
758 cmp_value.load_item();
759 LIR_Opr result = new_register(T_INT);
760 if (type == T_OBJECT || type == T_ARRAY) {
761 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
762 } else if (type == T_INT) {
763 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result);
764 } else if (type == T_LONG) {
765 tmp1 = new_register(T_LONG);
766 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result);
767 } else {
768 ShouldNotReachHere();
769 }
770 return result;
771 }
772
atomic_xchg(BasicType type,LIR_Opr addr,LIRItem & value)773 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
774 bool is_oop = type == T_OBJECT || type == T_ARRAY;
775 LIR_Opr result = new_register(type);
776 value.load_item();
777 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
778 LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
779 __ xchg(addr, value.result(), result, tmp);
780 return result;
781 }
782
atomic_add(BasicType type,LIR_Opr addr,LIRItem & value)783 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
784 LIR_Opr result = new_register(type);
785 value.load_item();
786 assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type");
787 LIR_Opr tmp = new_register(type);
788 __ xadd(addr, value.result(), result, tmp);
789 return result;
790 }
791
do_MathIntrinsic(Intrinsic * x)792 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
793 address runtime_func;
794 switch (x->id()) {
795 case vmIntrinsics::_dabs: {
796 #ifdef __SOFTFP__
797 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dabs);
798 break;
799 #else
800 assert(x->number_of_arguments() == 1, "wrong type");
801 LIRItem value(x->argument_at(0), this);
802 value.load_item();
803 __ abs(value.result(), rlock_result(x), LIR_OprFact::illegalOpr);
804 return;
805 #endif // __SOFTFP__
806 }
807 case vmIntrinsics::_dsqrt: {
808 #ifdef __SOFTFP__
809 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
810 break;
811 #else
812 assert(x->number_of_arguments() == 1, "wrong type");
813 LIRItem value(x->argument_at(0), this);
814 value.load_item();
815 __ sqrt(value.result(), rlock_result(x), LIR_OprFact::illegalOpr);
816 return;
817 #endif // __SOFTFP__
818 }
819 case vmIntrinsics::_dsin:
820 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
821 break;
822 case vmIntrinsics::_dcos:
823 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
824 break;
825 case vmIntrinsics::_dtan:
826 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
827 break;
828 case vmIntrinsics::_dlog:
829 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
830 break;
831 case vmIntrinsics::_dlog10:
832 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
833 break;
834 case vmIntrinsics::_dexp:
835 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
836 break;
837 case vmIntrinsics::_dpow:
838 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
839 break;
840 default:
841 ShouldNotReachHere();
842 return;
843 }
844
845 LIR_Opr result;
846 if (x->number_of_arguments() == 1) {
847 result = call_runtime(x->argument_at(0), runtime_func, x->type(), NULL);
848 } else {
849 assert(x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow, "unexpected intrinsic");
850 result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), NULL);
851 }
852 set_result(x, result);
853 }
854
do_FmaIntrinsic(Intrinsic * x)855 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
856 fatal("FMA intrinsic is not implemented on this platform");
857 }
858
do_vectorizedMismatch(Intrinsic * x)859 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
860 fatal("vectorizedMismatch intrinsic is not implemented on this platform");
861 }
862
do_ArrayCopy(Intrinsic * x)863 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
864 CodeEmitInfo* info = state_for(x, x->state());
865 assert(x->number_of_arguments() == 5, "wrong type");
866 LIRItem src(x->argument_at(0), this);
867 LIRItem src_pos(x->argument_at(1), this);
868 LIRItem dst(x->argument_at(2), this);
869 LIRItem dst_pos(x->argument_at(3), this);
870 LIRItem length(x->argument_at(4), this);
871
872 // We put arguments into the same registers which are used for a Java call.
873 // Note: we used fixed registers for all arguments because all registers
874 // are caller-saved, so register allocator treats them all as used.
875 src.load_item_force (FrameMap::R0_oop_opr);
876 src_pos.load_item_force(FrameMap::R1_opr);
877 dst.load_item_force (FrameMap::R2_oop_opr);
878 dst_pos.load_item_force(FrameMap::R3_opr);
879 length.load_item_force (FrameMap::R4_opr);
880 LIR_Opr tmp = (FrameMap::R5_opr);
881 set_no_result(x);
882
883 int flags;
884 ciArrayKlass* expected_type;
885 arraycopy_helper(x, &flags, &expected_type);
886 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(),
887 tmp, expected_type, flags, info);
888 }
889
do_update_CRC32(Intrinsic * x)890 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
891 fatal("CRC32 intrinsic is not implemented on this platform");
892 }
893
do_update_CRC32C(Intrinsic * x)894 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
895 Unimplemented();
896 }
897
do_Convert(Convert * x)898 void LIRGenerator::do_Convert(Convert* x) {
899 address runtime_func;
900 switch (x->op()) {
901 case Bytecodes::_l2f:
902 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
903 break;
904 case Bytecodes::_l2d:
905 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2d);
906 break;
907 case Bytecodes::_f2l:
908 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
909 break;
910 case Bytecodes::_d2l:
911 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
912 break;
913 #ifdef __SOFTFP__
914 case Bytecodes::_f2d:
915 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2d);
916 break;
917 case Bytecodes::_d2f:
918 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_d2f);
919 break;
920 case Bytecodes::_i2f:
921 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2f);
922 break;
923 case Bytecodes::_i2d:
924 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2d);
925 break;
926 case Bytecodes::_f2i:
927 runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2iz);
928 break;
929 case Bytecodes::_d2i:
930 // This is implemented in hard float in assembler on arm but a call
931 // on other platforms.
932 runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2i);
933 break;
934 #endif // __SOFTFP__
935 default: {
936 LIRItem value(x->value(), this);
937 value.load_item();
938 LIR_Opr reg = rlock_result(x);
939 __ convert(x->op(), value.result(), reg, NULL);
940 return;
941 }
942 }
943
944 LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), NULL);
945 set_result(x, result);
946 }
947
948
do_NewInstance(NewInstance * x)949 void LIRGenerator::do_NewInstance(NewInstance* x) {
950 print_if_not_loaded(x);
951
952 CodeEmitInfo* info = state_for(x, x->state());
953 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewInstanceStub::emit_code
954 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewInstanceStub::emit_code
955 LIR_Opr tmp1 = new_register(objectType);
956 LIR_Opr tmp2 = new_register(objectType);
957 LIR_Opr tmp3 = FrameMap::LR_oop_opr;
958
959 new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3,
960 LIR_OprFact::illegalOpr, klass_reg, info);
961
962 LIR_Opr result = rlock_result(x);
963 __ move(reg, result);
964 }
965
966
do_NewTypeArray(NewTypeArray * x)967 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
968 // Evaluate state_for() first, because it can emit code
969 // with the same fixed registers that are used here (R1, R2)
970 CodeEmitInfo* info = state_for(x, x->state());
971 LIRItem length(x->length(), this);
972
973 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewTypeArrayStub::emit_code
974 LIR_Opr len = length.result();
975
976 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewTypeArrayStub::emit_code
977 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewTypeArrayStub::emit_code
978
979 LIR_Opr tmp1 = new_register(objectType);
980 LIR_Opr tmp2 = new_register(objectType);
981 LIR_Opr tmp3 = FrameMap::LR_oop_opr;
982 LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
983
984 BasicType elem_type = x->elt_type();
985 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
986
987 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
988 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
989
990 LIR_Opr result = rlock_result(x);
991 __ move(reg, result);
992 }
993
994
do_NewObjectArray(NewObjectArray * x)995 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
996 // Evaluate state_for() first, because it can emit code
997 // with the same fixed registers that are used here (R1, R2)
998 CodeEmitInfo* info = state_for(x, x->state());
999 LIRItem length(x->length(), this);
1000
1001 length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewObjectArrayStub::emit_code
1002 LIR_Opr len = length.result();
1003
1004 CodeEmitInfo* patching_info = NULL;
1005 if (!x->klass()->is_loaded() || PatchALot) {
1006 patching_info = state_for(x, x->state_before());
1007 }
1008
1009 LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewObjectArrayStub::emit_code
1010 LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewObjectArrayStub::emit_code
1011
1012 LIR_Opr tmp1 = new_register(objectType);
1013 LIR_Opr tmp2 = new_register(objectType);
1014 LIR_Opr tmp3 = FrameMap::LR_oop_opr;
1015 LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
1016
1017 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1018 ciMetadata* obj = ciObjArrayKlass::make(x->klass());
1019 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1020 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1021 }
1022 klass2reg_with_patching(klass_reg, obj, patching_info);
1023 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1024
1025 LIR_Opr result = rlock_result(x);
1026 __ move(reg, result);
1027 }
1028
1029
do_NewMultiArray(NewMultiArray * x)1030 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1031 Values* dims = x->dims();
1032 int i = dims->length();
1033 LIRItemList* items = new LIRItemList(i, i, NULL);
1034 while (i-- > 0) {
1035 LIRItem* size = new LIRItem(dims->at(i), this);
1036 items->at_put(i, size);
1037 }
1038
1039 // Need to get the info before, as the items may become invalid through item_free
1040 CodeEmitInfo* patching_info = NULL;
1041 if (!x->klass()->is_loaded() || PatchALot) {
1042 patching_info = state_for(x, x->state_before());
1043
1044 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1045 // clone all handlers (NOTE: Usually this is handled transparently
1046 // by the CodeEmitInfo cloning logic in CodeStub constructors but
1047 // is done explicitly here because a stub isn't being used).
1048 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1049 }
1050
1051 i = dims->length();
1052 while (i-- > 0) {
1053 LIRItem* size = items->at(i);
1054 size->load_item();
1055 LIR_Opr sz = size->result();
1056 assert(sz->type() == T_INT, "should be");
1057 store_stack_parameter(sz, in_ByteSize(i * BytesPerInt));
1058 }
1059
1060 CodeEmitInfo* info = state_for(x, x->state());
1061 LIR_Opr klass_reg = FrameMap::R0_metadata_opr;
1062 klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1063
1064 LIR_Opr rank = FrameMap::R2_opr;
1065 __ move(LIR_OprFact::intConst(x->rank()), rank);
1066 LIR_Opr varargs = FrameMap::SP_opr;
1067 LIR_OprList* args = new LIR_OprList(3);
1068 args->append(klass_reg);
1069 args->append(rank);
1070 args->append(varargs);
1071 LIR_Opr reg = result_register_for(x->type());
1072 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1073 LIR_OprFact::illegalOpr, reg, args, info);
1074
1075 LIR_Opr result = rlock_result(x);
1076 __ move(reg, result);
1077 }
1078
1079
do_BlockBegin(BlockBegin * x)1080 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1081 // nothing to do for now
1082 }
1083
1084
do_CheckCast(CheckCast * x)1085 void LIRGenerator::do_CheckCast(CheckCast* x) {
1086 LIRItem obj(x->obj(), this);
1087 CodeEmitInfo* patching_info = NULL;
1088 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1089 patching_info = state_for(x, x->state_before());
1090 }
1091
1092 obj.load_item();
1093
1094 CodeEmitInfo* info_for_exception =
1095 (x->needs_exception_state() ? state_for(x) :
1096 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1097
1098 CodeStub* stub;
1099 if (x->is_incompatible_class_change_check()) {
1100 assert(patching_info == NULL, "can't patch this");
1101 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1102 LIR_OprFact::illegalOpr, info_for_exception);
1103 } else if (x->is_invokespecial_receiver_check()) {
1104 assert(patching_info == NULL, "can't patch this");
1105 stub = new DeoptimizeStub(info_for_exception,
1106 Deoptimization::Reason_class_check,
1107 Deoptimization::Action_none);
1108 } else {
1109 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id,
1110 LIR_OprFact::illegalOpr, info_for_exception);
1111 }
1112
1113 LIR_Opr out_reg = rlock_result(x);
1114 LIR_Opr tmp1 = FrameMap::R0_oop_opr;
1115 LIR_Opr tmp2 = FrameMap::R1_oop_opr;
1116 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1117
1118 __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(),
1119 info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci());
1120 }
1121
1122
do_InstanceOf(InstanceOf * x)1123 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1124 LIRItem obj(x->obj(), this);
1125 CodeEmitInfo* patching_info = NULL;
1126 if (!x->klass()->is_loaded() || PatchALot) {
1127 patching_info = state_for(x, x->state_before());
1128 }
1129
1130 obj.load_item();
1131 LIR_Opr out_reg = rlock_result(x);
1132 LIR_Opr tmp1 = FrameMap::R0_oop_opr;
1133 LIR_Opr tmp2 = FrameMap::R1_oop_opr;
1134 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1135
1136 __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1137 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1138 }
1139
1140
1141 #ifdef __SOFTFP__
1142 // Turn operator if (f <op> g) into runtime call:
1143 // call _aeabi_fcmp<op>(f, g)
1144 // cmp(eq, 1)
1145 // branch(eq, true path).
do_soft_float_compare(If * x)1146 void LIRGenerator::do_soft_float_compare(If* x) {
1147 assert(x->number_of_sux() == 2, "inconsistency");
1148 ValueTag tag = x->x()->type()->tag();
1149 If::Condition cond = x->cond();
1150 address runtime_func;
1151 // unordered comparison gets the wrong answer because aeabi functions
1152 // return false.
1153 bool unordered_is_true = x->unordered_is_true();
1154 // reverse of condition for ne
1155 bool compare_to_zero = false;
1156 switch (lir_cond(cond)) {
1157 case lir_cond_notEqual:
1158 compare_to_zero = true; // fall through
1159 case lir_cond_equal:
1160 runtime_func = tag == floatTag ?
1161 CAST_FROM_FN_PTR(address, __aeabi_fcmpeq):
1162 CAST_FROM_FN_PTR(address, __aeabi_dcmpeq);
1163 break;
1164 case lir_cond_less:
1165 if (unordered_is_true) {
1166 runtime_func = tag == floatTag ?
1167 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmplt):
1168 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmplt);
1169 } else {
1170 runtime_func = tag == floatTag ?
1171 CAST_FROM_FN_PTR(address, __aeabi_fcmplt):
1172 CAST_FROM_FN_PTR(address, __aeabi_dcmplt);
1173 }
1174 break;
1175 case lir_cond_lessEqual:
1176 if (unordered_is_true) {
1177 runtime_func = tag == floatTag ?
1178 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmple):
1179 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmple);
1180 } else {
1181 runtime_func = tag == floatTag ?
1182 CAST_FROM_FN_PTR(address, __aeabi_fcmple):
1183 CAST_FROM_FN_PTR(address, __aeabi_dcmple);
1184 }
1185 break;
1186 case lir_cond_greaterEqual:
1187 if (unordered_is_true) {
1188 runtime_func = tag == floatTag ?
1189 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpge):
1190 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpge);
1191 } else {
1192 runtime_func = tag == floatTag ?
1193 CAST_FROM_FN_PTR(address, __aeabi_fcmpge):
1194 CAST_FROM_FN_PTR(address, __aeabi_dcmpge);
1195 }
1196 break;
1197 case lir_cond_greater:
1198 if (unordered_is_true) {
1199 runtime_func = tag == floatTag ?
1200 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpgt):
1201 CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpgt);
1202 } else {
1203 runtime_func = tag == floatTag ?
1204 CAST_FROM_FN_PTR(address, __aeabi_fcmpgt):
1205 CAST_FROM_FN_PTR(address, __aeabi_dcmpgt);
1206 }
1207 break;
1208 case lir_cond_aboveEqual:
1209 case lir_cond_belowEqual:
1210 ShouldNotReachHere(); // We're not going to get these.
1211 default:
1212 assert(lir_cond(cond) == lir_cond_always, "must be");
1213 ShouldNotReachHere();
1214 }
1215 set_no_result(x);
1216
1217 // add safepoint before generating condition code so it can be recomputed
1218 if (x->is_safepoint()) {
1219 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1220 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1221 }
1222 // Call float compare function, returns (1,0) if true or false.
1223 LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, NULL);
1224 __ cmp(lir_cond_equal, result,
1225 compare_to_zero ?
1226 LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1));
1227 profile_branch(x, cond);
1228 move_to_phi(x->state());
1229 __ branch(lir_cond_equal, T_INT, x->tsux());
1230 }
1231 #endif // __SOFTFP__
1232
do_If(If * x)1233 void LIRGenerator::do_If(If* x) {
1234 assert(x->number_of_sux() == 2, "inconsistency");
1235 ValueTag tag = x->x()->type()->tag();
1236
1237 #ifdef __SOFTFP__
1238 if (tag == floatTag || tag == doubleTag) {
1239 do_soft_float_compare(x);
1240 assert(x->default_sux() == x->fsux(), "wrong destination above");
1241 __ jump(x->default_sux());
1242 return;
1243 }
1244 #endif // __SOFTFP__
1245
1246 LIRItem xitem(x->x(), this);
1247 LIRItem yitem(x->y(), this);
1248 LIRItem* xin = &xitem;
1249 LIRItem* yin = &yitem;
1250 If::Condition cond = x->cond();
1251
1252 if (tag == longTag) {
1253 if (cond == If::gtr || cond == If::leq) {
1254 cond = Instruction::mirror(cond);
1255 xin = &yitem;
1256 yin = &xitem;
1257 }
1258 xin->set_destroys_register();
1259 }
1260
1261 xin->load_item();
1262 LIR_Opr left = xin->result();
1263 LIR_Opr right;
1264
1265 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1266 (cond == If::eql || cond == If::neq)) {
1267 // inline long zero
1268 right = LIR_OprFact::value_type(yin->value()->type());
1269 } else {
1270 yin->load_nonconstant();
1271 right = yin->result();
1272 }
1273
1274 set_no_result(x);
1275
1276 // add safepoint before generating condition code so it can be recomputed
1277 if (x->is_safepoint()) {
1278 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1279 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1280 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1281 }
1282
1283 __ cmp(lir_cond(cond), left, right);
1284 profile_branch(x, cond);
1285 move_to_phi(x->state());
1286 if (x->x()->type()->is_float_kind()) {
1287 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1288 } else {
1289 __ branch(lir_cond(cond), right->type(), x->tsux());
1290 }
1291 assert(x->default_sux() == x->fsux(), "wrong destination above");
1292 __ jump(x->default_sux());
1293 }
1294
1295
getThreadPointer()1296 LIR_Opr LIRGenerator::getThreadPointer() {
1297 return FrameMap::Rthread_opr;
1298 }
1299
trace_block_entry(BlockBegin * block)1300 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1301 __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::R0_opr);
1302 LIR_OprList* args = new LIR_OprList(1);
1303 args->append(FrameMap::R0_opr);
1304 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1305 __ call_runtime_leaf(func, getThreadTemp(), LIR_OprFact::illegalOpr, args);
1306 }
1307
1308
volatile_field_store(LIR_Opr value,LIR_Address * address,CodeEmitInfo * info)1309 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1310 CodeEmitInfo* info) {
1311 if (value->is_double_cpu()) {
1312 assert(address->index()->is_illegal(), "should have a constant displacement");
1313 LIR_Opr tmp = new_pointer_register();
1314 add_large_constant(address->base(), address->disp(), tmp);
1315 __ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info);
1316 return;
1317 }
1318 __ store(value, address, info, lir_patch_none);
1319 }
1320
volatile_field_load(LIR_Address * address,LIR_Opr result,CodeEmitInfo * info)1321 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1322 CodeEmitInfo* info) {
1323 if (result->is_double_cpu()) {
1324 assert(address->index()->is_illegal(), "should have a constant displacement");
1325 LIR_Opr tmp = new_pointer_register();
1326 add_large_constant(address->base(), address->disp(), tmp);
1327 __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info);
1328 return;
1329 }
1330 __ load(address, result, info, lir_patch_none);
1331 }
1332