1 /*
2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "vmreg_ppc.inline.hpp"
41 
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47 
load_byte_item()48 void LIRItem::load_byte_item() {
49   // Byte loads use same registers as other loads.
50   load_item();
51 }
52 
53 
load_nonconstant()54 void LIRItem::load_nonconstant() {
55   LIR_Opr r = value()->operand();
56   if (_gen->can_inline_as_constant(value())) {
57     if (!r->is_constant()) {
58       r = LIR_OprFact::value_type(value()->type());
59     }
60     _result = r;
61   } else {
62     load_item();
63   }
64 }
65 
66 
67 //--------------------------------------------------------------
68 //               LIRGenerator
69 //--------------------------------------------------------------
70 
exceptionOopOpr()71 LIR_Opr LIRGenerator::exceptionOopOpr()              { return FrameMap::R3_oop_opr; }
exceptionPcOpr()72 LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::R4_opr; }
syncLockOpr()73 LIR_Opr LIRGenerator::syncLockOpr()                  { return FrameMap::R5_opr; }     // Need temp effect for MonitorEnterStub.
syncTempOpr()74 LIR_Opr LIRGenerator::syncTempOpr()                  { return FrameMap::R4_oop_opr; } // Need temp effect for MonitorEnterStub.
getThreadTemp()75 LIR_Opr LIRGenerator::getThreadTemp()                { return LIR_OprFact::illegalOpr; } // not needed
76 
result_register_for(ValueType * type,bool callee)77 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
78   LIR_Opr opr;
79   switch (type->tag()) {
80   case intTag:     opr = FrameMap::R3_opr;         break;
81   case objectTag:  opr = FrameMap::R3_oop_opr;     break;
82   case longTag:    opr = FrameMap::R3_long_opr;    break;
83   case floatTag:   opr = FrameMap::F1_opr;         break;
84   case doubleTag:  opr = FrameMap::F1_double_opr;  break;
85 
86   case addressTag:
87   default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
88   }
89 
90   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
91   return opr;
92 }
93 
rlock_callee_saved(BasicType type)94 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
95   ShouldNotReachHere();
96   return LIR_OprFact::illegalOpr;
97 }
98 
99 
rlock_byte(BasicType type)100 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
101   return new_register(T_INT);
102 }
103 
104 
105 //--------- loading items into registers --------------------------------
106 
107 // PPC cannot inline all constants.
can_store_as_constant(Value v,BasicType type) const108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
109   if (v->type()->as_IntConstant() != NULL) {
110     return Assembler::is_simm16(v->type()->as_IntConstant()->value());
111   } else if (v->type()->as_LongConstant() != NULL) {
112     return Assembler::is_simm16(v->type()->as_LongConstant()->value());
113   } else if (v->type()->as_ObjectConstant() != NULL) {
114     return v->type()->as_ObjectConstant()->value()->is_null_object();
115   } else {
116     return false;
117   }
118 }
119 
120 
121 // Only simm16 constants can be inlined.
can_inline_as_constant(Value i) const122 bool LIRGenerator::can_inline_as_constant(Value i) const {
123   return can_store_as_constant(i, as_BasicType(i->type()));
124 }
125 
126 
can_inline_as_constant(LIR_Const * c) const127 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
128   if (c->type() == T_INT) {
129     return Assembler::is_simm16(c->as_jint());
130   }
131   if (c->type() == T_LONG) {
132     return Assembler::is_simm16(c->as_jlong());
133   }
134   if (c->type() == T_OBJECT) {
135     return c->as_jobject() == NULL;
136   }
137   return false;
138 }
139 
140 
safepoint_poll_register()141 LIR_Opr LIRGenerator::safepoint_poll_register() {
142   return new_register(T_INT);
143 }
144 
145 
generate_address(LIR_Opr base,LIR_Opr index,int shift,int disp,BasicType type)146 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
147                                             int shift, int disp, BasicType type) {
148   assert(base->is_register(), "must be");
149   intx large_disp = disp;
150 
151   // Accumulate fixed displacements.
152   if (index->is_constant()) {
153     LIR_Const *constant = index->as_constant_ptr();
154     if (constant->type() == T_LONG) {
155       large_disp += constant->as_jlong() << shift;
156     } else {
157       large_disp += (intx)(constant->as_jint()) << shift;
158     }
159     index = LIR_OprFact::illegalOpr;
160   }
161 
162   if (index->is_register()) {
163     // Apply the shift and accumulate the displacement.
164     if (shift > 0) {
165       LIR_Opr tmp = new_pointer_register();
166       __ shift_left(index, shift, tmp);
167       index = tmp;
168     }
169     if (large_disp != 0) {
170       LIR_Opr tmp = new_pointer_register();
171       if (Assembler::is_simm16(large_disp)) {
172         __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
173         index = tmp;
174       } else {
175         __ move(LIR_OprFact::intptrConst(large_disp), tmp);
176         __ add(tmp, index, tmp);
177         index = tmp;
178       }
179       large_disp = 0;
180     }
181   } else if (!Assembler::is_simm16(large_disp)) {
182     // Index is illegal so replace it with the displacement loaded into a register.
183     index = new_pointer_register();
184     __ move(LIR_OprFact::intptrConst(large_disp), index);
185     large_disp = 0;
186   }
187 
188   // At this point we either have base + index or base + displacement.
189   if (large_disp == 0) {
190     return new LIR_Address(base, index, type);
191   } else {
192     assert(Assembler::is_simm16(large_disp), "must be");
193     return new LIR_Address(base, large_disp, type);
194   }
195 }
196 
197 
emit_array_address(LIR_Opr array_opr,LIR_Opr index_opr,BasicType type)198 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
199                                               BasicType type) {
200   int elem_size = type2aelembytes(type);
201   int shift = exact_log2(elem_size);
202 
203   LIR_Opr base_opr;
204   intx offset = arrayOopDesc::base_offset_in_bytes(type);
205 
206   if (index_opr->is_constant()) {
207     intx i = index_opr->as_constant_ptr()->as_jint();
208     intx array_offset = i * elem_size;
209     if (Assembler::is_simm16(array_offset + offset)) {
210       base_opr = array_opr;
211       offset = array_offset + offset;
212     } else {
213       base_opr = new_pointer_register();
214       if (Assembler::is_simm16(array_offset)) {
215         __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
216       } else {
217         __ move(LIR_OprFact::intptrConst(array_offset), base_opr);
218         __ add(base_opr, array_opr, base_opr);
219       }
220     }
221   } else {
222 #ifdef _LP64
223     if (index_opr->type() == T_INT) {
224       LIR_Opr tmp = new_register(T_LONG);
225       __ convert(Bytecodes::_i2l, index_opr, tmp);
226       index_opr = tmp;
227     }
228 #endif
229 
230     base_opr = new_pointer_register();
231     assert (index_opr->is_register(), "Must be register");
232     if (shift > 0) {
233       __ shift_left(index_opr, shift, base_opr);
234       __ add(base_opr, array_opr, base_opr);
235     } else {
236       __ add(index_opr, array_opr, base_opr);
237     }
238   }
239   return new LIR_Address(base_opr, offset, type);
240 }
241 
242 
load_immediate(int x,BasicType type)243 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
244   LIR_Opr r = NULL;
245   if (type == T_LONG) {
246     r = LIR_OprFact::longConst(x);
247   } else if (type == T_INT) {
248     r = LIR_OprFact::intConst(x);
249   } else {
250     ShouldNotReachHere();
251   }
252   if (!Assembler::is_simm16(x)) {
253     LIR_Opr tmp = new_register(type);
254     __ move(r, tmp);
255     return tmp;
256   }
257   return r;
258 }
259 
260 
increment_counter(address counter,BasicType type,int step)261 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
262   LIR_Opr pointer = new_pointer_register();
263   __ move(LIR_OprFact::intptrConst(counter), pointer);
264   LIR_Address* addr = new LIR_Address(pointer, type);
265   increment_counter(addr, step);
266 }
267 
268 
increment_counter(LIR_Address * addr,int step)269 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
270   LIR_Opr temp = new_register(addr->type());
271   __ move(addr, temp);
272   __ add(temp, load_immediate(step, addr->type()), temp);
273   __ move(temp, addr);
274 }
275 
276 
cmp_mem_int(LIR_Condition condition,LIR_Opr base,int disp,int c,CodeEmitInfo * info)277 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
278   LIR_Opr tmp = FrameMap::R0_opr;
279   __ load(new LIR_Address(base, disp, T_INT), tmp, info);
280   __ cmp(condition, tmp, c);
281 }
282 
283 
cmp_reg_mem(LIR_Condition condition,LIR_Opr reg,LIR_Opr base,int disp,BasicType type,CodeEmitInfo * info)284 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
285                                int disp, BasicType type, CodeEmitInfo* info) {
286   LIR_Opr tmp = FrameMap::R0_opr;
287   __ load(new LIR_Address(base, disp, type), tmp, info);
288   __ cmp(condition, reg, tmp);
289 }
290 
291 
strength_reduce_multiply(LIR_Opr left,jint c,LIR_Opr result,LIR_Opr tmp)292 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
293   assert(left != result, "should be different registers");
294   if (is_power_of_2(c + 1)) {
295     __ shift_left(left, log2_int(c + 1), result);
296     __ sub(result, left, result);
297     return true;
298   } else if (is_power_of_2(c - 1)) {
299     __ shift_left(left, log2_int(c - 1), result);
300     __ add(result, left, result);
301     return true;
302   }
303   return false;
304 }
305 
306 
store_stack_parameter(LIR_Opr item,ByteSize offset_from_sp)307 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
308   BasicType t = item->type();
309   LIR_Opr sp_opr = FrameMap::SP_opr;
310   if ((t == T_LONG || t == T_DOUBLE) &&
311       ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) {
312     __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
313   } else {
314     __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
315   }
316 }
317 
318 
319 //----------------------------------------------------------------------
320 //             visitor functions
321 //----------------------------------------------------------------------
322 
array_store_check(LIR_Opr value,LIR_Opr array,CodeEmitInfo * store_check_info,ciMethod * profiled_method,int profiled_bci)323 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
324   // Following registers are used by slow_subtype_check:
325   LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
326   LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
327   LIR_Opr tmp3 = FrameMap::R6_opr; // temp
328   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
329 }
330 
331 
do_MonitorEnter(MonitorEnter * x)332 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
333   assert(x->is_pinned(),"");
334   LIRItem obj(x->obj(), this);
335   obj.load_item();
336 
337   set_no_result(x);
338 
339   // We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub).
340   LIR_Opr lock    = FrameMap::R5_opr;
341   LIR_Opr scratch = FrameMap::R4_opr;
342   LIR_Opr hdr     = FrameMap::R6_opr;
343 
344   CodeEmitInfo* info_for_exception = NULL;
345   if (x->needs_null_check()) {
346     info_for_exception = state_for(x);
347   }
348 
349   // This CodeEmitInfo must not have the xhandlers because here the
350   // object is already locked (xhandlers expects object to be unlocked).
351   CodeEmitInfo* info = state_for(x, x->state(), true);
352   monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
353 }
354 
355 
do_MonitorExit(MonitorExit * x)356 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
357   assert(x->is_pinned(),"");
358   LIRItem obj(x->obj(), this);
359   obj.dont_load_item();
360 
361   set_no_result(x);
362   LIR_Opr lock     = FrameMap::R5_opr;
363   LIR_Opr hdr      = FrameMap::R4_opr; // Used for slow path (MonitorExitStub).
364   LIR_Opr obj_temp = FrameMap::R6_opr;
365   monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
366 }
367 
368 
369 // _ineg, _lneg, _fneg, _dneg
do_NegateOp(NegateOp * x)370 void LIRGenerator::do_NegateOp(NegateOp* x) {
371   LIRItem value(x->x(), this);
372   value.load_item();
373   LIR_Opr reg = rlock_result(x);
374   __ negate(value.result(), reg);
375 }
376 
377 
378 // for  _fadd, _fmul, _fsub, _fdiv, _frem
379 //      _dadd, _dmul, _dsub, _ddiv, _drem
do_ArithmeticOp_FPU(ArithmeticOp * x)380 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
381   switch (x->op()) {
382   case Bytecodes::_fadd:
383   case Bytecodes::_fmul:
384   case Bytecodes::_fsub:
385   case Bytecodes::_fdiv:
386   case Bytecodes::_dadd:
387   case Bytecodes::_dmul:
388   case Bytecodes::_dsub:
389   case Bytecodes::_ddiv: {
390     LIRItem left(x->x(), this);
391     LIRItem right(x->y(), this);
392     left.load_item();
393     right.load_item();
394     rlock_result(x);
395     arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
396   }
397   break;
398 
399   case Bytecodes::_frem:
400   case Bytecodes::_drem: {
401     address entry = NULL;
402     switch (x->op()) {
403     case Bytecodes::_frem:
404       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
405       break;
406     case Bytecodes::_drem:
407       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
408       break;
409     default:
410       ShouldNotReachHere();
411     }
412     LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
413     set_result(x, result);
414   }
415   break;
416 
417   default: ShouldNotReachHere();
418   }
419 }
420 
421 
422 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
do_ArithmeticOp_Long(ArithmeticOp * x)423 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
424   bool is_div_rem = x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem;
425 
426   LIRItem right(x->y(), this);
427   // Missing test if instr is commutative and if we should swap.
428   if (right.value()->type()->as_LongConstant() &&
429       (x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == ((-1)<<15)) ) {
430     // Sub is implemented by addi and can't support min_simm16 as constant..
431     right.load_item();
432   } else {
433     right.load_nonconstant();
434   }
435   assert(right.is_constant() || right.is_register(), "wrong state of right");
436 
437   if (is_div_rem) {
438     LIR_Opr divisor = right.result();
439     if (divisor->is_register()) {
440       CodeEmitInfo* null_check_info = state_for(x);
441       __ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0));
442       __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(null_check_info));
443     } else {
444       jlong const_divisor = divisor->as_constant_ptr()->as_jlong();
445       if (const_divisor == 0) {
446         CodeEmitInfo* null_check_info = state_for(x);
447         __ jump(new DivByZeroStub(null_check_info));
448         rlock_result(x);
449         __ move(LIR_OprFact::longConst(0), x->operand()); // dummy
450         return;
451       }
452       if (x->op() == Bytecodes::_lrem && !is_power_of_2(const_divisor) && const_divisor != -1) {
453         // Remainder computation would need additional tmp != R0.
454         right.load_item();
455       }
456     }
457   }
458 
459   LIRItem left(x->x(), this);
460   left.load_item();
461   rlock_result(x);
462   if (is_div_rem) {
463     CodeEmitInfo* info = NULL; // Null check already done above.
464     LIR_Opr tmp = FrameMap::R0_opr;
465     if (x->op() == Bytecodes::_lrem) {
466       __ irem(left.result(), right.result(), x->operand(), tmp, info);
467     } else if (x->op() == Bytecodes::_ldiv) {
468       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
469     }
470   } else {
471     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
472   }
473 }
474 
475 
476 // for: _iadd, _imul, _isub, _idiv, _irem
do_ArithmeticOp_Int(ArithmeticOp * x)477 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
478   bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
479 
480   LIRItem right(x->y(), this);
481   // Missing test if instr is commutative and if we should swap.
482   if (right.value()->type()->as_IntConstant() &&
483       (x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == ((-1)<<15)) ) {
484     // Sub is implemented by addi and can't support min_simm16 as constant.
485     right.load_item();
486   } else {
487     right.load_nonconstant();
488   }
489   assert(right.is_constant() || right.is_register(), "wrong state of right");
490 
491   if (is_div_rem) {
492     LIR_Opr divisor = right.result();
493     if (divisor->is_register()) {
494       CodeEmitInfo* null_check_info = state_for(x);
495       __ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0));
496       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(null_check_info));
497     } else {
498       jint const_divisor = divisor->as_constant_ptr()->as_jint();
499       if (const_divisor == 0) {
500         CodeEmitInfo* null_check_info = state_for(x);
501         __ jump(new DivByZeroStub(null_check_info));
502         rlock_result(x);
503         __ move(LIR_OprFact::intConst(0), x->operand()); // dummy
504         return;
505       }
506       if (x->op() == Bytecodes::_irem && !is_power_of_2(const_divisor) && const_divisor != -1) {
507         // Remainder computation would need additional tmp != R0.
508         right.load_item();
509       }
510     }
511   }
512 
513   LIRItem left(x->x(), this);
514   left.load_item();
515   rlock_result(x);
516   if (is_div_rem) {
517     CodeEmitInfo* info = NULL; // Null check already done above.
518     LIR_Opr tmp = FrameMap::R0_opr;
519     if (x->op() == Bytecodes::_irem) {
520       __ irem(left.result(), right.result(), x->operand(), tmp, info);
521     } else if (x->op() == Bytecodes::_idiv) {
522       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
523     }
524   } else {
525     arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::R0_opr);
526   }
527 }
528 
529 
do_ArithmeticOp(ArithmeticOp * x)530 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
531   ValueTag tag = x->type()->tag();
532   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
533   switch (tag) {
534     case floatTag:
535     case doubleTag: do_ArithmeticOp_FPU(x);  return;
536     case longTag:   do_ArithmeticOp_Long(x); return;
537     case intTag:    do_ArithmeticOp_Int(x);  return;
538   }
539   ShouldNotReachHere();
540 }
541 
542 
543 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
do_ShiftOp(ShiftOp * x)544 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
545   LIRItem value(x->x(), this);
546   LIRItem count(x->y(), this);
547   value.load_item();
548   LIR_Opr reg = rlock_result(x);
549   LIR_Opr mcount;
550   if (count.result()->is_register()) {
551     mcount = FrameMap::R0_opr;
552   } else {
553     mcount = LIR_OprFact::illegalOpr;
554   }
555   shift_op(x->op(), reg, value.result(), count.result(), mcount);
556 }
557 
558 
can_handle_logic_op_as_uimm(ValueType * type,Bytecodes::Code bc)559 inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) {
560   jlong int_or_long_const;
561   if (type->as_IntConstant()) {
562     int_or_long_const = type->as_IntConstant()->value();
563   } else if (type->as_LongConstant()) {
564     int_or_long_const = type->as_LongConstant()->value();
565   } else if (type->as_ObjectConstant()) {
566     return type->as_ObjectConstant()->value()->is_null_object();
567   } else {
568     return false;
569   }
570 
571   if (Assembler::is_uimm(int_or_long_const, 16)) return true;
572   if ((int_or_long_const & 0xFFFF) == 0 &&
573       Assembler::is_uimm((jlong)((julong)int_or_long_const >> 16), 16)) return true;
574 
575   // see Assembler::andi
576   if (bc == Bytecodes::_iand &&
577       (is_power_of_2_long(int_or_long_const+1) ||
578        is_power_of_2_long(int_or_long_const) ||
579        is_power_of_2_long(-int_or_long_const))) return true;
580   if (bc == Bytecodes::_land &&
581       (is_power_of_2_long(int_or_long_const+1) ||
582        (Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2_long(int_or_long_const)) ||
583        (int_or_long_const != min_jlong && is_power_of_2_long(-int_or_long_const)))) return true;
584 
585   // special case: xor -1
586   if ((bc == Bytecodes::_ixor || bc == Bytecodes::_lxor) &&
587       int_or_long_const == -1) return true;
588   return false;
589 }
590 
591 
592 // _iand, _land, _ior, _lor, _ixor, _lxor
do_LogicOp(LogicOp * x)593 void LIRGenerator::do_LogicOp(LogicOp* x) {
594   LIRItem left(x->x(), this);
595   LIRItem right(x->y(), this);
596 
597   left.load_item();
598 
599   Value rval = right.value();
600   LIR_Opr r = rval->operand();
601   ValueType *type = rval->type();
602   // Logic instructions use unsigned immediate values.
603   if (can_handle_logic_op_as_uimm(type, x->op())) {
604     if (!r->is_constant()) {
605       r = LIR_OprFact::value_type(type);
606       rval->set_operand(r);
607     }
608     right.set_result(r);
609   } else {
610     right.load_item();
611   }
612 
613   LIR_Opr reg = rlock_result(x);
614 
615   logic_op(x->op(), reg, left.result(), right.result());
616 }
617 
618 
619 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
do_CompareOp(CompareOp * x)620 void LIRGenerator::do_CompareOp(CompareOp* x) {
621   LIRItem left(x->x(), this);
622   LIRItem right(x->y(), this);
623   left.load_item();
624   right.load_item();
625   LIR_Opr reg = rlock_result(x);
626   if (x->x()->type()->is_float_kind()) {
627     Bytecodes::Code code = x->op();
628     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
629   } else if (x->x()->type()->tag() == longTag) {
630     __ lcmp2int(left.result(), right.result(), reg);
631   } else {
632     Unimplemented();
633   }
634 }
635 
636 
atomic_cmpxchg(BasicType type,LIR_Opr addr,LIRItem & cmp_value,LIRItem & new_value)637 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
638   LIR_Opr result = new_register(T_INT);
639   LIR_Opr t1 = LIR_OprFact::illegalOpr;
640   LIR_Opr t2 = LIR_OprFact::illegalOpr;
641   cmp_value.load_item();
642   new_value.load_item();
643 
644   // Volatile load may be followed by Unsafe CAS.
645   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
646     __ membar();
647   } else {
648     __ membar_release();
649   }
650 
651   if (type == T_OBJECT || type == T_ARRAY) {
652     if (UseCompressedOops) {
653       t1 = new_register(T_OBJECT);
654       t2 = new_register(T_OBJECT);
655     }
656     __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
657   } else if (type == T_INT) {
658     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
659   } else if (type == T_LONG) {
660     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
661   } else {
662     Unimplemented();
663   }
664   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
665            result, type);
666   return result;
667 }
668 
669 
atomic_xchg(BasicType type,LIR_Opr addr,LIRItem & value)670 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
671   LIR_Opr result = new_register(type);
672   LIR_Opr tmp = FrameMap::R0_opr;
673 
674   value.load_item();
675 
676   // Volatile load may be followed by Unsafe CAS.
677   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
678     __ membar();
679   } else {
680     __ membar_release();
681   }
682 
683   __ xchg(addr, value.result(), result, tmp);
684 
685   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
686     __ membar_acquire();
687   } else {
688     __ membar();
689   }
690   return result;
691 }
692 
693 
atomic_add(BasicType type,LIR_Opr addr,LIRItem & value)694 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
695   LIR_Opr result = new_register(type);
696   LIR_Opr tmp = FrameMap::R0_opr;
697 
698   value.load_item();
699 
700   // Volatile load may be followed by Unsafe CAS.
701   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
702     __ membar(); // To be safe. Unsafe semantics are unclear.
703   } else {
704     __ membar_release();
705   }
706 
707   __ xadd(addr, value.result(), result, tmp);
708 
709   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
710     __ membar_acquire();
711   } else {
712     __ membar();
713   }
714   return result;
715 }
716 
717 
do_MathIntrinsic(Intrinsic * x)718 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
719   switch (x->id()) {
720     case vmIntrinsics::_dabs: {
721       assert(x->number_of_arguments() == 1, "wrong type");
722       LIRItem value(x->argument_at(0), this);
723       value.load_item();
724       LIR_Opr dst = rlock_result(x);
725       __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
726       break;
727     }
728     case vmIntrinsics::_dsqrt: {
729       if (VM_Version::has_fsqrt()) {
730         assert(x->number_of_arguments() == 1, "wrong type");
731         LIRItem value(x->argument_at(0), this);
732         value.load_item();
733         LIR_Opr dst = rlock_result(x);
734         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
735         break;
736       } // else fallthru
737     }
738     case vmIntrinsics::_dlog10: // fall through
739     case vmIntrinsics::_dlog: // fall through
740     case vmIntrinsics::_dsin: // fall through
741     case vmIntrinsics::_dtan: // fall through
742     case vmIntrinsics::_dcos: // fall through
743     case vmIntrinsics::_dexp: {
744       assert(x->number_of_arguments() == 1, "wrong type");
745 
746       address runtime_entry = NULL;
747       switch (x->id()) {
748       case vmIntrinsics::_dsqrt:
749         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
750         break;
751       case vmIntrinsics::_dsin:
752         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
753         break;
754       case vmIntrinsics::_dcos:
755         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
756         break;
757       case vmIntrinsics::_dtan:
758         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
759         break;
760       case vmIntrinsics::_dlog:
761         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
762         break;
763       case vmIntrinsics::_dlog10:
764         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
765         break;
766       case vmIntrinsics::_dexp:
767         runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
768         break;
769       default:
770         ShouldNotReachHere();
771       }
772 
773       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
774       set_result(x, result);
775       break;
776     }
777     case vmIntrinsics::_dpow: {
778       assert(x->number_of_arguments() == 2, "wrong type");
779       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
780       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
781       set_result(x, result);
782       break;
783     }
784   }
785 }
786 
787 
do_ArrayCopy(Intrinsic * x)788 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
789   assert(x->number_of_arguments() == 5, "wrong type");
790 
791   // Make all state_for calls early since they can emit code.
792   CodeEmitInfo* info = state_for(x, x->state());
793 
794   LIRItem src     (x->argument_at(0), this);
795   LIRItem src_pos (x->argument_at(1), this);
796   LIRItem dst     (x->argument_at(2), this);
797   LIRItem dst_pos (x->argument_at(3), this);
798   LIRItem length  (x->argument_at(4), this);
799 
800   // Load all values in callee_save_registers (C calling convention),
801   // as this makes the parameter passing to the fast case simpler.
802   src.load_item_force     (FrameMap::R14_oop_opr);
803   src_pos.load_item_force (FrameMap::R15_opr);
804   dst.load_item_force     (FrameMap::R17_oop_opr);
805   dst_pos.load_item_force (FrameMap::R18_opr);
806   length.load_item_force  (FrameMap::R19_opr);
807   LIR_Opr tmp =            FrameMap::R20_opr;
808 
809   int flags;
810   ciArrayKlass* expected_type;
811   arraycopy_helper(x, &flags, &expected_type);
812 
813   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
814                length.result(), tmp,
815                expected_type, flags, info);
816   set_no_result(x);
817 }
818 
819 
820 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
821 // _i2b, _i2c, _i2s
do_Convert(Convert * x)822 void LIRGenerator::do_Convert(Convert* x) {
823   if (!VM_Version::has_mtfprd()) {
824     switch (x->op()) {
825 
826       // int -> float: force spill
827       case Bytecodes::_l2f: {
828         if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
829           // fcfid+frsp needs fixup code to avoid rounding incompatibility.
830           address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
831           LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
832           set_result(x, result);
833           return;
834         } // else fallthru
835       }
836       case Bytecodes::_l2d: {
837         LIRItem value(x->value(), this);
838         LIR_Opr reg = rlock_result(x);
839         value.load_item();
840         LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
841         __ convert(x->op(), tmp, reg);
842         return;
843       }
844       case Bytecodes::_i2f:
845       case Bytecodes::_i2d: {
846         LIRItem value(x->value(), this);
847         LIR_Opr reg = rlock_result(x);
848         value.load_item();
849         // Convert i2l first.
850         LIR_Opr tmp1 = new_register(T_LONG);
851         __ convert(Bytecodes::_i2l, value.result(), tmp1);
852         LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
853         __ convert(x->op(), tmp2, reg);
854         return;
855       }
856 
857       // float -> int: result will be stored
858       case Bytecodes::_f2l:
859       case Bytecodes::_d2l: {
860         LIRItem value(x->value(), this);
861         LIR_Opr reg = rlock_result(x);
862         value.set_destroys_register(); // USE_KILL
863         value.load_item();
864         set_vreg_flag(reg, must_start_in_memory);
865         __ convert(x->op(), value.result(), reg);
866         return;
867       }
868       case Bytecodes::_f2i:
869       case Bytecodes::_d2i: {
870         LIRItem value(x->value(), this);
871         LIR_Opr reg = rlock_result(x);
872         value.set_destroys_register(); // USE_KILL
873         value.load_item();
874         // Convert l2i afterwards.
875         LIR_Opr tmp1 = new_register(T_LONG);
876         set_vreg_flag(tmp1, must_start_in_memory);
877         __ convert(x->op(), value.result(), tmp1);
878         __ convert(Bytecodes::_l2i, tmp1, reg);
879         return;
880       }
881 
882       // Within same category: just register conversions.
883       case Bytecodes::_i2b:
884       case Bytecodes::_i2c:
885       case Bytecodes::_i2s:
886       case Bytecodes::_i2l:
887       case Bytecodes::_l2i:
888       case Bytecodes::_f2d:
889       case Bytecodes::_d2f:
890         break;
891 
892       default: ShouldNotReachHere();
893     }
894   }
895 
896   // Register conversion.
897   LIRItem value(x->value(), this);
898   LIR_Opr reg = rlock_result(x);
899   value.load_item();
900   switch (x->op()) {
901     case Bytecodes::_f2l:
902     case Bytecodes::_d2l:
903     case Bytecodes::_f2i:
904     case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL
905     default: break;
906   }
907   __ convert(x->op(), value.result(), reg);
908 }
909 
910 
do_NewInstance(NewInstance * x)911 void LIRGenerator::do_NewInstance(NewInstance* x) {
912   // This instruction can be deoptimized in the slow path.
913   const LIR_Opr reg = result_register_for(x->type());
914 #ifndef PRODUCT
915   if (PrintNotLoaded && !x->klass()->is_loaded()) {
916     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
917   }
918 #endif
919   CodeEmitInfo* info = state_for(x, x->state());
920   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub).
921   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
922   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
923   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
924   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
925   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
926 
927   // Must prevent reordering of stores for object initialization
928   // with stores that publish the new object.
929   __ membar_storestore();
930   LIR_Opr result = rlock_result(x);
931   __ move(reg, result);
932 }
933 
934 
do_NewTypeArray(NewTypeArray * x)935 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
936   // Evaluate state_for early since it may emit code.
937   CodeEmitInfo* info = state_for(x, x->state());
938 
939   LIRItem length(x->length(), this);
940   length.load_item();
941 
942   LIR_Opr reg = result_register_for(x->type());
943   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub).
944   // We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub).
945   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
946   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
947   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
948   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
949   LIR_Opr len = length.result();
950   BasicType elem_type = x->elt_type();
951 
952   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
953 
954   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
955   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
956 
957   // Must prevent reordering of stores for object initialization
958   // with stores that publish the new object.
959   __ membar_storestore();
960   LIR_Opr result = rlock_result(x);
961   __ move(reg, result);
962 }
963 
964 
do_NewObjectArray(NewObjectArray * x)965 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
966   // Evaluate state_for early since it may emit code.
967   CodeEmitInfo* info = state_for(x, x->state());
968   // In case of patching (i.e., object class is not yet loaded),
969   // we need to reexecute the instruction and therefore provide
970   // the state before the parameters have been consumed.
971   CodeEmitInfo* patching_info = NULL;
972   if (!x->klass()->is_loaded() || PatchALot) {
973     patching_info = state_for(x, x->state_before());
974   }
975 
976   LIRItem length(x->length(), this);
977   length.load_item();
978 
979   const LIR_Opr reg = result_register_for(x->type());
980   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub).
981   // We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub).
982   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
983   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
984   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
985   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
986   LIR_Opr len = length.result();
987 
988   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
989   ciMetadata* obj = ciObjArrayKlass::make(x->klass());
990   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
991     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
992   }
993   klass2reg_with_patching(klass_reg, obj, patching_info);
994   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
995 
996   // Must prevent reordering of stores for object initialization
997   // with stores that publish the new object.
998   __ membar_storestore();
999   LIR_Opr result = rlock_result(x);
1000   __ move(reg, result);
1001 }
1002 
1003 
do_NewMultiArray(NewMultiArray * x)1004 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1005   Values* dims = x->dims();
1006   int i = dims->length();
1007   LIRItemList* items = new LIRItemList(i, i, NULL);
1008   while (i-- > 0) {
1009     LIRItem* size = new LIRItem(dims->at(i), this);
1010     items->at_put(i, size);
1011   }
1012 
1013   // Evaluate state_for early since it may emit code.
1014   CodeEmitInfo* patching_info = NULL;
1015   if (!x->klass()->is_loaded() || PatchALot) {
1016     patching_info = state_for(x, x->state_before());
1017 
1018     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1019     // clone all handlers (NOTE: Usually this is handled transparently
1020     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1021     // is done explicitly here because a stub isn't being used).
1022     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1023   }
1024   CodeEmitInfo* info = state_for(x, x->state());
1025 
1026   i = dims->length();
1027   while (i-- > 0) {
1028     LIRItem* size = items->at(i);
1029     size->load_nonconstant();
1030     // FrameMap::_reserved_argument_area_size includes the dimensions
1031     // varargs, because it's initialized to hir()->max_stack() when the
1032     // FrameMap is created.
1033     store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
1034   }
1035 
1036   const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path.
1037   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1038 
1039   LIR_Opr rank = FrameMap::R5_opr; // Used by slow path.
1040   __ move(LIR_OprFact::intConst(x->rank()), rank);
1041 
1042   LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path.
1043   __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
1044           varargs);
1045 
1046   // Note: This instruction can be deoptimized in the slow path.
1047   LIR_OprList* args = new LIR_OprList(3);
1048   args->append(klass_reg);
1049   args->append(rank);
1050   args->append(varargs);
1051   const LIR_Opr reg = result_register_for(x->type());
1052   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1053                   LIR_OprFact::illegalOpr,
1054                   reg, args, info);
1055 
1056   // Must prevent reordering of stores for object initialization
1057   // with stores that publish the new object.
1058   __ membar_storestore();
1059   LIR_Opr result = rlock_result(x);
1060   __ move(reg, result);
1061 }
1062 
1063 
do_BlockBegin(BlockBegin * x)1064 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1065   // nothing to do for now
1066 }
1067 
1068 
do_CheckCast(CheckCast * x)1069 void LIRGenerator::do_CheckCast(CheckCast* x) {
1070   LIRItem obj(x->obj(), this);
1071   CodeEmitInfo* patching_info = NULL;
1072   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1073     // Must do this before locking the destination register as
1074     // an oop register, and before the obj is loaded (so x->obj()->item()
1075     // is valid for creating a debug info location).
1076     patching_info = state_for(x, x->state_before());
1077   }
1078   obj.load_item();
1079   LIR_Opr out_reg = rlock_result(x);
1080   CodeStub* stub;
1081   CodeEmitInfo* info_for_exception =
1082       (x->needs_exception_state() ? state_for(x) :
1083                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1084 
1085   if (x->is_incompatible_class_change_check()) {
1086     assert(patching_info == NULL, "can't patch this");
1087     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1088                                    LIR_OprFact::illegalOpr, info_for_exception);
1089   } else if (x->is_invokespecial_receiver_check()) {
1090     assert(patching_info == NULL, "can't patch this");
1091     stub = new DeoptimizeStub(info_for_exception,
1092                               Deoptimization::Reason_class_check,
1093                               Deoptimization::Action_none);
1094   } else {
1095     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1096   }
1097   // Following registers are used by slow_subtype_check:
1098   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1099   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1100   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1101   __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1102                x->direct_compare(), info_for_exception, patching_info, stub,
1103                x->profiled_method(), x->profiled_bci());
1104 }
1105 
1106 
do_InstanceOf(InstanceOf * x)1107 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1108   LIRItem obj(x->obj(), this);
1109   CodeEmitInfo* patching_info = NULL;
1110   if (!x->klass()->is_loaded() || PatchALot) {
1111     patching_info = state_for(x, x->state_before());
1112   }
1113   // Ensure the result register is not the input register because the
1114   // result is initialized before the patching safepoint.
1115   obj.load_item();
1116   LIR_Opr out_reg = rlock_result(x);
1117   // Following registers are used by slow_subtype_check:
1118   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1119   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1120   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1121   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1122                 x->direct_compare(), patching_info,
1123                 x->profiled_method(), x->profiled_bci());
1124 }
1125 
1126 
do_If(If * x)1127 void LIRGenerator::do_If(If* x) {
1128   assert(x->number_of_sux() == 2, "inconsistency");
1129   ValueTag tag = x->x()->type()->tag();
1130   LIRItem xitem(x->x(), this);
1131   LIRItem yitem(x->y(), this);
1132   LIRItem* xin = &xitem;
1133   LIRItem* yin = &yitem;
1134   If::Condition cond = x->cond();
1135 
1136   LIR_Opr left = LIR_OprFact::illegalOpr;
1137   LIR_Opr right = LIR_OprFact::illegalOpr;
1138 
1139   xin->load_item();
1140   left = xin->result();
1141 
1142   if (yin->result()->is_constant() && yin->result()->type() == T_INT &&
1143       Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) {
1144     // Inline int constants which are small enough to be immediate operands.
1145     right = LIR_OprFact::value_type(yin->value()->type());
1146   } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1147              (cond == If::eql || cond == If::neq)) {
1148     // Inline long zero.
1149     right = LIR_OprFact::value_type(yin->value()->type());
1150   } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
1151     right = LIR_OprFact::value_type(yin->value()->type());
1152   } else {
1153     yin->load_item();
1154     right = yin->result();
1155   }
1156   set_no_result(x);
1157 
1158   // Add safepoint before generating condition code so it can be recomputed.
1159   if (x->is_safepoint()) {
1160     // Increment backedge counter if needed.
1161     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1162         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1163     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1164   }
1165 
1166   __ cmp(lir_cond(cond), left, right);
1167   // Generate branch profiling. Profiling code doesn't kill flags.
1168   profile_branch(x, cond);
1169   move_to_phi(x->state());
1170   if (x->x()->type()->is_float_kind()) {
1171     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1172   } else {
1173     __ branch(lir_cond(cond), right->type(), x->tsux());
1174   }
1175   assert(x->default_sux() == x->fsux(), "wrong destination above");
1176   __ jump(x->default_sux());
1177 }
1178 
1179 
getThreadPointer()1180 LIR_Opr LIRGenerator::getThreadPointer() {
1181   return FrameMap::as_pointer_opr(R16_thread);
1182 }
1183 
1184 
trace_block_entry(BlockBegin * block)1185 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1186   LIR_Opr arg1 = FrameMap::R3_opr; // ARG1
1187   __ move(LIR_OprFact::intConst(block->block_id()), arg1);
1188   LIR_OprList* args = new LIR_OprList(1);
1189   args->append(arg1);
1190   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1191   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1192 }
1193 
1194 
volatile_field_store(LIR_Opr value,LIR_Address * address,CodeEmitInfo * info)1195 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1196                                         CodeEmitInfo* info) {
1197 #ifdef _LP64
1198   __ store(value, address, info);
1199 #else
1200   Unimplemented();
1201 //  __ volatile_store_mem_reg(value, address, info);
1202 #endif
1203 }
1204 
volatile_field_load(LIR_Address * address,LIR_Opr result,CodeEmitInfo * info)1205 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1206                                        CodeEmitInfo* info) {
1207 #ifdef _LP64
1208   __ load(address, result, info);
1209 #else
1210   Unimplemented();
1211 //  __ volatile_load_mem_reg(address, result, info);
1212 #endif
1213 }
1214 
1215 
do_update_CRC32(Intrinsic * x)1216 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1217   assert(UseCRC32Intrinsics, "or should not be here");
1218   LIR_Opr result = rlock_result(x);
1219 
1220   switch (x->id()) {
1221     case vmIntrinsics::_updateCRC32: {
1222       LIRItem crc(x->argument_at(0), this);
1223       LIRItem val(x->argument_at(1), this);
1224       // Registers destroyed by update_crc32.
1225       crc.set_destroys_register();
1226       val.set_destroys_register();
1227       crc.load_item();
1228       val.load_item();
1229       __ update_crc32(crc.result(), val.result(), result);
1230       break;
1231     }
1232     case vmIntrinsics::_updateBytesCRC32:
1233     case vmIntrinsics::_updateByteBufferCRC32: {
1234       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1235 
1236       LIRItem crc(x->argument_at(0), this);
1237       LIRItem buf(x->argument_at(1), this);
1238       LIRItem off(x->argument_at(2), this);
1239       LIRItem len(x->argument_at(3), this);
1240       buf.load_item();
1241       off.load_nonconstant();
1242 
1243       LIR_Opr index = off.result();
1244       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1245       if (off.result()->is_constant()) {
1246         index = LIR_OprFact::illegalOpr;
1247         offset += off.result()->as_jint();
1248       }
1249       LIR_Opr base_op = buf.result();
1250       LIR_Address* a = NULL;
1251 
1252       if (index->is_valid()) {
1253         LIR_Opr tmp = new_register(T_LONG);
1254         __ convert(Bytecodes::_i2l, index, tmp);
1255         index = tmp;
1256         __ add(index, LIR_OprFact::intptrConst(offset), index);
1257         a = new LIR_Address(base_op, index, T_BYTE);
1258       } else {
1259         a = new LIR_Address(base_op, offset, T_BYTE);
1260       }
1261 
1262       BasicTypeList signature(3);
1263       signature.append(T_INT);
1264       signature.append(T_ADDRESS);
1265       signature.append(T_INT);
1266       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1267       const LIR_Opr result_reg = result_register_for(x->type());
1268 
1269       LIR_Opr arg1 = cc->at(0),
1270               arg2 = cc->at(1),
1271               arg3 = cc->at(2);
1272 
1273       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1274       __ leal(LIR_OprFact::address(a), arg2);
1275       len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
1276 
1277       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1278       __ move(result_reg, result);
1279       break;
1280     }
1281     default: {
1282       ShouldNotReachHere();
1283     }
1284   }
1285 }
1286 
do_update_CRC32C(Intrinsic * x)1287 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1288   assert(UseCRC32CIntrinsics, "or should not be here");
1289   LIR_Opr result = rlock_result(x);
1290 
1291   switch (x->id()) {
1292     case vmIntrinsics::_updateBytesCRC32C:
1293     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1294       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1295 
1296       LIRItem crc(x->argument_at(0), this);
1297       LIRItem buf(x->argument_at(1), this);
1298       LIRItem off(x->argument_at(2), this);
1299       LIRItem end(x->argument_at(3), this);
1300       buf.load_item();
1301       off.load_nonconstant();
1302       end.load_nonconstant();
1303 
1304       // len = end - off
1305       LIR_Opr len  = end.result();
1306       LIR_Opr tmpA = new_register(T_INT);
1307       LIR_Opr tmpB = new_register(T_INT);
1308       __ move(end.result(), tmpA);
1309       __ move(off.result(), tmpB);
1310       __ sub(tmpA, tmpB, tmpA);
1311       len = tmpA;
1312 
1313       LIR_Opr index = off.result();
1314       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1315       if (off.result()->is_constant()) {
1316         index = LIR_OprFact::illegalOpr;
1317         offset += off.result()->as_jint();
1318       }
1319       LIR_Opr base_op = buf.result();
1320       LIR_Address* a = NULL;
1321 
1322       if (index->is_valid()) {
1323         LIR_Opr tmp = new_register(T_LONG);
1324         __ convert(Bytecodes::_i2l, index, tmp);
1325         index = tmp;
1326         __ add(index, LIR_OprFact::intptrConst(offset), index);
1327         a = new LIR_Address(base_op, index, T_BYTE);
1328       } else {
1329         a = new LIR_Address(base_op, offset, T_BYTE);
1330       }
1331 
1332       BasicTypeList signature(3);
1333       signature.append(T_INT);
1334       signature.append(T_ADDRESS);
1335       signature.append(T_INT);
1336       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1337       const LIR_Opr result_reg = result_register_for(x->type());
1338 
1339       LIR_Opr arg1 = cc->at(0),
1340               arg2 = cc->at(1),
1341               arg3 = cc->at(2);
1342 
1343       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
1344       __ leal(LIR_OprFact::address(a), arg2);
1345       __ move(len, cc->at(2));   // We skip int->long conversion here, because CRC32C stub expects int.
1346 
1347       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1348       __ move(result_reg, result);
1349       break;
1350     }
1351     default: {
1352       ShouldNotReachHere();
1353     }
1354   }
1355 }
1356 
do_FmaIntrinsic(Intrinsic * x)1357 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1358   assert(x->number_of_arguments() == 3, "wrong type");
1359   assert(UseFMA, "Needs FMA instructions support.");
1360   LIRItem value(x->argument_at(0), this);
1361   LIRItem value1(x->argument_at(1), this);
1362   LIRItem value2(x->argument_at(2), this);
1363 
1364   value.load_item();
1365   value1.load_item();
1366   value2.load_item();
1367 
1368   LIR_Opr calc_input = value.result();
1369   LIR_Opr calc_input1 = value1.result();
1370   LIR_Opr calc_input2 = value2.result();
1371   LIR_Opr calc_result = rlock_result(x);
1372 
1373   switch (x->id()) {
1374   case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1375   case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1376   default:                  ShouldNotReachHere();
1377   }
1378 }
1379 
do_vectorizedMismatch(Intrinsic * x)1380 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1381   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1382 }
1383