1 /*
2  * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_ppc.inline.hpp"
42 
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48 
load_byte_item()49 void LIRItem::load_byte_item() {
50   // Byte loads use same registers as other loads.
51   load_item();
52 }
53 
54 
load_nonconstant()55 void LIRItem::load_nonconstant() {
56   LIR_Opr r = value()->operand();
57   if (_gen->can_inline_as_constant(value())) {
58     if (!r->is_constant()) {
59       r = LIR_OprFact::value_type(value()->type());
60     }
61     _result = r;
62   } else {
63     load_item();
64   }
65 }
66 
67 
68 //--------------------------------------------------------------
69 //               LIRGenerator
70 //--------------------------------------------------------------
71 
exceptionOopOpr()72 LIR_Opr LIRGenerator::exceptionOopOpr()              { return FrameMap::R3_oop_opr; }
exceptionPcOpr()73 LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::R4_opr; }
syncLockOpr()74 LIR_Opr LIRGenerator::syncLockOpr()                  { return FrameMap::R5_opr; }     // Need temp effect for MonitorEnterStub.
syncTempOpr()75 LIR_Opr LIRGenerator::syncTempOpr()                  { return FrameMap::R4_oop_opr; } // Need temp effect for MonitorEnterStub.
getThreadTemp()76 LIR_Opr LIRGenerator::getThreadTemp()                { return LIR_OprFact::illegalOpr; } // not needed
77 
result_register_for(ValueType * type,bool callee)78 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
79   LIR_Opr opr;
80   switch (type->tag()) {
81   case intTag:     opr = FrameMap::R3_opr;         break;
82   case objectTag:  opr = FrameMap::R3_oop_opr;     break;
83   case longTag:    opr = FrameMap::R3_long_opr;    break;
84   case floatTag:   opr = FrameMap::F1_opr;         break;
85   case doubleTag:  opr = FrameMap::F1_double_opr;  break;
86 
87   case addressTag:
88   default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
89   }
90 
91   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
92   return opr;
93 }
94 
rlock_callee_saved(BasicType type)95 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
96   ShouldNotReachHere();
97   return LIR_OprFact::illegalOpr;
98 }
99 
100 
rlock_byte(BasicType type)101 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
102   return new_register(T_INT);
103 }
104 
105 
106 //--------- loading items into registers --------------------------------
107 
108 // PPC cannot inline all constants.
can_store_as_constant(Value v,BasicType type) const109 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
110   if (v->type()->as_IntConstant() != NULL) {
111     return Assembler::is_simm16(v->type()->as_IntConstant()->value());
112   } else if (v->type()->as_LongConstant() != NULL) {
113     return Assembler::is_simm16(v->type()->as_LongConstant()->value());
114   } else if (v->type()->as_ObjectConstant() != NULL) {
115     return v->type()->as_ObjectConstant()->value()->is_null_object();
116   } else {
117     return false;
118   }
119 }
120 
121 
122 // Only simm16 constants can be inlined.
can_inline_as_constant(Value i) const123 bool LIRGenerator::can_inline_as_constant(Value i) const {
124   return can_store_as_constant(i, as_BasicType(i->type()));
125 }
126 
127 
can_inline_as_constant(LIR_Const * c) const128 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
129   if (c->type() == T_INT) {
130     return Assembler::is_simm16(c->as_jint());
131   }
132   if (c->type() == T_LONG) {
133     return Assembler::is_simm16(c->as_jlong());
134   }
135   if (c->type() == T_OBJECT) {
136     return c->as_jobject() == NULL;
137   }
138   return false;
139 }
140 
141 
safepoint_poll_register()142 LIR_Opr LIRGenerator::safepoint_poll_register() {
143   return new_register(T_INT);
144 }
145 
146 
generate_address(LIR_Opr base,LIR_Opr index,int shift,int disp,BasicType type)147 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
148                                             int shift, int disp, BasicType type) {
149   assert(base->is_register(), "must be");
150   intx large_disp = disp;
151 
152   // Accumulate fixed displacements.
153   if (index->is_constant()) {
154     LIR_Const *constant = index->as_constant_ptr();
155     if (constant->type() == T_LONG) {
156       large_disp += constant->as_jlong() << shift;
157     } else {
158       large_disp += (intx)(constant->as_jint()) << shift;
159     }
160     index = LIR_OprFact::illegalOpr;
161   }
162 
163   if (index->is_register()) {
164     // Apply the shift and accumulate the displacement.
165     if (shift > 0) {
166       LIR_Opr tmp = new_pointer_register();
167       __ shift_left(index, shift, tmp);
168       index = tmp;
169     }
170     if (large_disp != 0) {
171       LIR_Opr tmp = new_pointer_register();
172       if (Assembler::is_simm16(large_disp)) {
173         __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
174         index = tmp;
175       } else {
176         __ move(LIR_OprFact::intptrConst(large_disp), tmp);
177         __ add(tmp, index, tmp);
178         index = tmp;
179       }
180       large_disp = 0;
181     }
182   } else if (!Assembler::is_simm16(large_disp)) {
183     // Index is illegal so replace it with the displacement loaded into a register.
184     index = new_pointer_register();
185     __ move(LIR_OprFact::intptrConst(large_disp), index);
186     large_disp = 0;
187   }
188 
189   // At this point we either have base + index or base + displacement.
190   if (large_disp == 0) {
191     return new LIR_Address(base, index, type);
192   } else {
193     assert(Assembler::is_simm16(large_disp), "must be");
194     return new LIR_Address(base, large_disp, type);
195   }
196 }
197 
198 
emit_array_address(LIR_Opr array_opr,LIR_Opr index_opr,BasicType type)199 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
200                                               BasicType type) {
201   int elem_size = type2aelembytes(type);
202   int shift = exact_log2(elem_size);
203 
204   LIR_Opr base_opr;
205   intx offset = arrayOopDesc::base_offset_in_bytes(type);
206 
207   if (index_opr->is_constant()) {
208     intx i = index_opr->as_constant_ptr()->as_jint();
209     intx array_offset = i * elem_size;
210     if (Assembler::is_simm16(array_offset + offset)) {
211       base_opr = array_opr;
212       offset = array_offset + offset;
213     } else {
214       base_opr = new_pointer_register();
215       if (Assembler::is_simm16(array_offset)) {
216         __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
217       } else {
218         __ move(LIR_OprFact::intptrConst(array_offset), base_opr);
219         __ add(base_opr, array_opr, base_opr);
220       }
221     }
222   } else {
223 #ifdef _LP64
224     if (index_opr->type() == T_INT) {
225       LIR_Opr tmp = new_register(T_LONG);
226       __ convert(Bytecodes::_i2l, index_opr, tmp);
227       index_opr = tmp;
228     }
229 #endif
230 
231     base_opr = new_pointer_register();
232     assert (index_opr->is_register(), "Must be register");
233     if (shift > 0) {
234       __ shift_left(index_opr, shift, base_opr);
235       __ add(base_opr, array_opr, base_opr);
236     } else {
237       __ add(index_opr, array_opr, base_opr);
238     }
239   }
240   return new LIR_Address(base_opr, offset, type);
241 }
242 
243 
load_immediate(int x,BasicType type)244 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
245   LIR_Opr r = NULL;
246   if (type == T_LONG) {
247     r = LIR_OprFact::longConst(x);
248   } else if (type == T_INT) {
249     r = LIR_OprFact::intConst(x);
250   } else {
251     ShouldNotReachHere();
252   }
253   if (!Assembler::is_simm16(x)) {
254     LIR_Opr tmp = new_register(type);
255     __ move(r, tmp);
256     return tmp;
257   }
258   return r;
259 }
260 
261 
increment_counter(address counter,BasicType type,int step)262 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
263   LIR_Opr pointer = new_pointer_register();
264   __ move(LIR_OprFact::intptrConst(counter), pointer);
265   LIR_Address* addr = new LIR_Address(pointer, type);
266   increment_counter(addr, step);
267 }
268 
269 
increment_counter(LIR_Address * addr,int step)270 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
271   LIR_Opr temp = new_register(addr->type());
272   __ move(addr, temp);
273   __ add(temp, load_immediate(step, addr->type()), temp);
274   __ move(temp, addr);
275 }
276 
277 
cmp_mem_int(LIR_Condition condition,LIR_Opr base,int disp,int c,CodeEmitInfo * info)278 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
279   LIR_Opr tmp = FrameMap::R0_opr;
280   __ load(new LIR_Address(base, disp, T_INT), tmp, info);
281   __ cmp(condition, tmp, c);
282 }
283 
284 
cmp_reg_mem(LIR_Condition condition,LIR_Opr reg,LIR_Opr base,int disp,BasicType type,CodeEmitInfo * info)285 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
286                                int disp, BasicType type, CodeEmitInfo* info) {
287   LIR_Opr tmp = FrameMap::R0_opr;
288   __ load(new LIR_Address(base, disp, type), tmp, info);
289   __ cmp(condition, reg, tmp);
290 }
291 
292 
strength_reduce_multiply(LIR_Opr left,int c,LIR_Opr result,LIR_Opr tmp)293 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
294   assert(left != result, "should be different registers");
295   if (is_power_of_2(c + 1)) {
296     __ shift_left(left, log2_int(c + 1), result);
297     __ sub(result, left, result);
298     return true;
299   } else if (is_power_of_2(c - 1)) {
300     __ shift_left(left, log2_int(c - 1), result);
301     __ add(result, left, result);
302     return true;
303   }
304   return false;
305 }
306 
307 
store_stack_parameter(LIR_Opr item,ByteSize offset_from_sp)308 void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
309   BasicType t = item->type();
310   LIR_Opr sp_opr = FrameMap::SP_opr;
311   if ((t == T_LONG || t == T_DOUBLE) &&
312       (in_bytes(offset_from_sp) % 8 != 0)) {
313     __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
314   } else {
315     __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
316   }
317 }
318 
319 
320 //----------------------------------------------------------------------
321 //             visitor functions
322 //----------------------------------------------------------------------
323 
array_store_check(LIR_Opr value,LIR_Opr array,CodeEmitInfo * store_check_info,ciMethod * profiled_method,int profiled_bci)324 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
325   // Following registers are used by slow_subtype_check:
326   LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
327   LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
328   LIR_Opr tmp3 = FrameMap::R6_opr; // temp
329   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
330 }
331 
332 
do_MonitorEnter(MonitorEnter * x)333 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
334   assert(x->is_pinned(),"");
335   LIRItem obj(x->obj(), this);
336   obj.load_item();
337 
338   set_no_result(x);
339 
340   // We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub).
341   LIR_Opr lock    = FrameMap::R5_opr;
342   LIR_Opr scratch = FrameMap::R4_opr;
343   LIR_Opr hdr     = FrameMap::R6_opr;
344 
345   CodeEmitInfo* info_for_exception = NULL;
346   if (x->needs_null_check()) {
347     info_for_exception = state_for(x);
348   }
349 
350   // This CodeEmitInfo must not have the xhandlers because here the
351   // object is already locked (xhandlers expects object to be unlocked).
352   CodeEmitInfo* info = state_for(x, x->state(), true);
353   monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
354 }
355 
356 
do_MonitorExit(MonitorExit * x)357 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
358   assert(x->is_pinned(),"");
359   LIRItem obj(x->obj(), this);
360   obj.dont_load_item();
361 
362   set_no_result(x);
363   LIR_Opr lock     = FrameMap::R5_opr;
364   LIR_Opr hdr      = FrameMap::R4_opr; // Used for slow path (MonitorExitStub).
365   LIR_Opr obj_temp = FrameMap::R6_opr;
366   monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
367 }
368 
369 
370 // _ineg, _lneg, _fneg, _dneg
do_NegateOp(NegateOp * x)371 void LIRGenerator::do_NegateOp(NegateOp* x) {
372   LIRItem value(x->x(), this);
373   value.load_item();
374   LIR_Opr reg = rlock_result(x);
375   __ negate(value.result(), reg);
376 }
377 
378 
379 // for  _fadd, _fmul, _fsub, _fdiv, _frem
380 //      _dadd, _dmul, _dsub, _ddiv, _drem
do_ArithmeticOp_FPU(ArithmeticOp * x)381 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
382   switch (x->op()) {
383   case Bytecodes::_fadd:
384   case Bytecodes::_fmul:
385   case Bytecodes::_fsub:
386   case Bytecodes::_fdiv:
387   case Bytecodes::_dadd:
388   case Bytecodes::_dmul:
389   case Bytecodes::_dsub:
390   case Bytecodes::_ddiv: {
391     LIRItem left(x->x(), this);
392     LIRItem right(x->y(), this);
393     left.load_item();
394     right.load_item();
395     rlock_result(x);
396     arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
397   }
398   break;
399 
400   case Bytecodes::_frem:
401   case Bytecodes::_drem: {
402     address entry = NULL;
403     switch (x->op()) {
404     case Bytecodes::_frem:
405       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
406       break;
407     case Bytecodes::_drem:
408       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
409       break;
410     default:
411       ShouldNotReachHere();
412     }
413     LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
414     set_result(x, result);
415   }
416   break;
417 
418   default: ShouldNotReachHere();
419   }
420 }
421 
422 
423 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
do_ArithmeticOp_Long(ArithmeticOp * x)424 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
425   bool is_div_rem = x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem;
426 
427   LIRItem right(x->y(), this);
428   // Missing test if instr is commutative and if we should swap.
429   if (right.value()->type()->as_LongConstant() &&
430       (x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == ((-1)<<15)) ) {
431     // Sub is implemented by addi and can't support min_simm16 as constant..
432     right.load_item();
433   } else {
434     right.load_nonconstant();
435   }
436   assert(right.is_constant() || right.is_register(), "wrong state of right");
437 
438   if (is_div_rem) {
439     LIR_Opr divisor = right.result();
440     if (divisor->is_register()) {
441       CodeEmitInfo* null_check_info = state_for(x);
442       __ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0));
443       __ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
444     } else {
445       jlong const_divisor = divisor->as_constant_ptr()->as_jlong();
446       if (const_divisor == 0) {
447         CodeEmitInfo* null_check_info = state_for(x);
448         __ jump(new DivByZeroStub(null_check_info));
449         rlock_result(x);
450         __ move(LIR_OprFact::longConst(0), x->operand()); // dummy
451         return;
452       }
453       if (x->op() == Bytecodes::_lrem && !is_power_of_2(const_divisor) && const_divisor != -1) {
454         // Remainder computation would need additional tmp != R0.
455         right.load_item();
456       }
457     }
458   }
459 
460   LIRItem left(x->x(), this);
461   left.load_item();
462   rlock_result(x);
463   if (is_div_rem) {
464     CodeEmitInfo* info = NULL; // Null check already done above.
465     LIR_Opr tmp = FrameMap::R0_opr;
466     if (x->op() == Bytecodes::_lrem) {
467       __ irem(left.result(), right.result(), x->operand(), tmp, info);
468     } else if (x->op() == Bytecodes::_ldiv) {
469       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
470     }
471   } else {
472     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
473   }
474 }
475 
476 
477 // for: _iadd, _imul, _isub, _idiv, _irem
do_ArithmeticOp_Int(ArithmeticOp * x)478 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
479   bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
480 
481   LIRItem right(x->y(), this);
482   // Missing test if instr is commutative and if we should swap.
483   if (right.value()->type()->as_IntConstant() &&
484       (x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == ((-1)<<15)) ) {
485     // Sub is implemented by addi and can't support min_simm16 as constant.
486     right.load_item();
487   } else {
488     right.load_nonconstant();
489   }
490   assert(right.is_constant() || right.is_register(), "wrong state of right");
491 
492   if (is_div_rem) {
493     LIR_Opr divisor = right.result();
494     if (divisor->is_register()) {
495       CodeEmitInfo* null_check_info = state_for(x);
496       __ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0));
497       __ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
498     } else {
499       jint const_divisor = divisor->as_constant_ptr()->as_jint();
500       if (const_divisor == 0) {
501         CodeEmitInfo* null_check_info = state_for(x);
502         __ jump(new DivByZeroStub(null_check_info));
503         rlock_result(x);
504         __ move(LIR_OprFact::intConst(0), x->operand()); // dummy
505         return;
506       }
507       if (x->op() == Bytecodes::_irem && !is_power_of_2(const_divisor) && const_divisor != -1) {
508         // Remainder computation would need additional tmp != R0.
509         right.load_item();
510       }
511     }
512   }
513 
514   LIRItem left(x->x(), this);
515   left.load_item();
516   rlock_result(x);
517   if (is_div_rem) {
518     CodeEmitInfo* info = NULL; // Null check already done above.
519     LIR_Opr tmp = FrameMap::R0_opr;
520     if (x->op() == Bytecodes::_irem) {
521       __ irem(left.result(), right.result(), x->operand(), tmp, info);
522     } else if (x->op() == Bytecodes::_idiv) {
523       __ idiv(left.result(), right.result(), x->operand(), tmp, info);
524     }
525   } else {
526     arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::R0_opr);
527   }
528 }
529 
530 
do_ArithmeticOp(ArithmeticOp * x)531 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
532   ValueTag tag = x->type()->tag();
533   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
534   switch (tag) {
535     case floatTag:
536     case doubleTag: do_ArithmeticOp_FPU(x);  return;
537     case longTag:   do_ArithmeticOp_Long(x); return;
538     case intTag:    do_ArithmeticOp_Int(x);  return;
539     default: ShouldNotReachHere();
540   }
541 }
542 
543 
544 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
do_ShiftOp(ShiftOp * x)545 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
546   LIRItem value(x->x(), this);
547   LIRItem count(x->y(), this);
548   value.load_item();
549   LIR_Opr reg = rlock_result(x);
550   LIR_Opr mcount;
551   if (count.result()->is_register()) {
552     mcount = FrameMap::R0_opr;
553   } else {
554     mcount = LIR_OprFact::illegalOpr;
555   }
556   shift_op(x->op(), reg, value.result(), count.result(), mcount);
557 }
558 
559 
can_handle_logic_op_as_uimm(ValueType * type,Bytecodes::Code bc)560 inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) {
561   jlong int_or_long_const;
562   if (type->as_IntConstant()) {
563     int_or_long_const = type->as_IntConstant()->value();
564   } else if (type->as_LongConstant()) {
565     int_or_long_const = type->as_LongConstant()->value();
566   } else if (type->as_ObjectConstant()) {
567     return type->as_ObjectConstant()->value()->is_null_object();
568   } else {
569     return false;
570   }
571 
572   if (Assembler::is_uimm(int_or_long_const, 16)) return true;
573   if ((int_or_long_const & 0xFFFF) == 0 &&
574       Assembler::is_uimm((jlong)((julong)int_or_long_const >> 16), 16)) return true;
575 
576   // see Assembler::andi
577   if (bc == Bytecodes::_iand &&
578       (is_power_of_2(int_or_long_const+1) ||
579        is_power_of_2(int_or_long_const) ||
580        is_power_of_2(-int_or_long_const))) return true;
581   if (bc == Bytecodes::_land &&
582       (is_power_of_2(int_or_long_const+1) ||
583        (Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2(int_or_long_const)) ||
584        (int_or_long_const != min_jlong && is_power_of_2(-int_or_long_const)))) return true;
585 
586   // special case: xor -1
587   if ((bc == Bytecodes::_ixor || bc == Bytecodes::_lxor) &&
588       int_or_long_const == -1) return true;
589   return false;
590 }
591 
592 
593 // _iand, _land, _ior, _lor, _ixor, _lxor
do_LogicOp(LogicOp * x)594 void LIRGenerator::do_LogicOp(LogicOp* x) {
595   LIRItem left(x->x(), this);
596   LIRItem right(x->y(), this);
597 
598   left.load_item();
599 
600   Value rval = right.value();
601   LIR_Opr r = rval->operand();
602   ValueType *type = rval->type();
603   // Logic instructions use unsigned immediate values.
604   if (can_handle_logic_op_as_uimm(type, x->op())) {
605     if (!r->is_constant()) {
606       r = LIR_OprFact::value_type(type);
607       rval->set_operand(r);
608     }
609     right.set_result(r);
610   } else {
611     right.load_item();
612   }
613 
614   LIR_Opr reg = rlock_result(x);
615 
616   logic_op(x->op(), reg, left.result(), right.result());
617 }
618 
619 
620 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
do_CompareOp(CompareOp * x)621 void LIRGenerator::do_CompareOp(CompareOp* x) {
622   LIRItem left(x->x(), this);
623   LIRItem right(x->y(), this);
624   left.load_item();
625   right.load_item();
626   LIR_Opr reg = rlock_result(x);
627   if (x->x()->type()->is_float_kind()) {
628     Bytecodes::Code code = x->op();
629     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
630   } else if (x->x()->type()->tag() == longTag) {
631     __ lcmp2int(left.result(), right.result(), reg);
632   } else {
633     Unimplemented();
634   }
635 }
636 
637 
atomic_cmpxchg(BasicType type,LIR_Opr addr,LIRItem & cmp_value,LIRItem & new_value)638 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
639   LIR_Opr result = new_register(T_INT);
640   LIR_Opr t1 = LIR_OprFact::illegalOpr;
641   LIR_Opr t2 = LIR_OprFact::illegalOpr;
642   cmp_value.load_item();
643   new_value.load_item();
644 
645   // Volatile load may be followed by Unsafe CAS.
646   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
647     __ membar();
648   } else {
649     __ membar_release();
650   }
651 
652   if (is_reference_type(type)) {
653     if (UseCompressedOops) {
654       t1 = new_register(T_OBJECT);
655       t2 = new_register(T_OBJECT);
656     }
657     __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
658   } else if (type == T_INT) {
659     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
660   } else if (type == T_LONG) {
661     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
662   } else {
663     Unimplemented();
664   }
665   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
666            result, type);
667   return result;
668 }
669 
670 
atomic_xchg(BasicType type,LIR_Opr addr,LIRItem & value)671 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
672   LIR_Opr result = new_register(type);
673   LIR_Opr tmp = FrameMap::R0_opr;
674 
675   value.load_item();
676 
677   // Volatile load may be followed by Unsafe CAS.
678   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
679     __ membar();
680   } else {
681     __ membar_release();
682   }
683 
684   __ xchg(addr, value.result(), result, tmp);
685 
686   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
687     __ membar_acquire();
688   } else {
689     __ membar();
690   }
691   return result;
692 }
693 
694 
atomic_add(BasicType type,LIR_Opr addr,LIRItem & value)695 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
696   LIR_Opr result = new_register(type);
697   LIR_Opr tmp = FrameMap::R0_opr;
698 
699   value.load_item();
700 
701   // Volatile load may be followed by Unsafe CAS.
702   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
703     __ membar(); // To be safe. Unsafe semantics are unclear.
704   } else {
705     __ membar_release();
706   }
707 
708   __ xadd(addr, value.result(), result, tmp);
709 
710   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
711     __ membar_acquire();
712   } else {
713     __ membar();
714   }
715   return result;
716 }
717 
718 
do_MathIntrinsic(Intrinsic * x)719 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
720   switch (x->id()) {
721     case vmIntrinsics::_dabs: {
722       assert(x->number_of_arguments() == 1, "wrong type");
723       LIRItem value(x->argument_at(0), this);
724       value.load_item();
725       LIR_Opr dst = rlock_result(x);
726       __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
727       break;
728     }
729     case vmIntrinsics::_dsqrt: {
730       if (VM_Version::has_fsqrt()) {
731         assert(x->number_of_arguments() == 1, "wrong type");
732         LIRItem value(x->argument_at(0), this);
733         value.load_item();
734         LIR_Opr dst = rlock_result(x);
735         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
736         break;
737       } // else fallthru
738     }
739     case vmIntrinsics::_dsin:   // fall through
740     case vmIntrinsics::_dcos:   // fall through
741     case vmIntrinsics::_dtan:   // fall through
742     case vmIntrinsics::_dlog:   // fall through
743     case vmIntrinsics::_dlog10: // fall through
744     case vmIntrinsics::_dexp: {
745       assert(x->number_of_arguments() == 1, "wrong type");
746 
747       address runtime_entry = NULL;
748       switch (x->id()) {
749         case vmIntrinsics::_dsqrt:
750           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
751           break;
752         case vmIntrinsics::_dsin:
753           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
754           break;
755         case vmIntrinsics::_dcos:
756           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
757           break;
758         case vmIntrinsics::_dtan:
759           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
760           break;
761         case vmIntrinsics::_dlog:
762           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
763           break;
764         case vmIntrinsics::_dlog10:
765           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
766           break;
767         case vmIntrinsics::_dexp:
768           runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
769           break;
770         default:
771           ShouldNotReachHere();
772       }
773 
774       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
775       set_result(x, result);
776       break;
777     }
778     case vmIntrinsics::_dpow: {
779       assert(x->number_of_arguments() == 2, "wrong type");
780       address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
781       LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
782       set_result(x, result);
783       break;
784     }
785     default:
786       break;
787   }
788 }
789 
790 
do_ArrayCopy(Intrinsic * x)791 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
792   assert(x->number_of_arguments() == 5, "wrong type");
793 
794   // Make all state_for calls early since they can emit code.
795   CodeEmitInfo* info = state_for(x, x->state());
796 
797   LIRItem src     (x->argument_at(0), this);
798   LIRItem src_pos (x->argument_at(1), this);
799   LIRItem dst     (x->argument_at(2), this);
800   LIRItem dst_pos (x->argument_at(3), this);
801   LIRItem length  (x->argument_at(4), this);
802 
803   // Load all values in callee_save_registers (C calling convention),
804   // as this makes the parameter passing to the fast case simpler.
805   src.load_item_force     (FrameMap::R14_oop_opr);
806   src_pos.load_item_force (FrameMap::R15_opr);
807   dst.load_item_force     (FrameMap::R17_oop_opr);
808   dst_pos.load_item_force (FrameMap::R18_opr);
809   length.load_item_force  (FrameMap::R19_opr);
810   LIR_Opr tmp =            FrameMap::R20_opr;
811 
812   int flags;
813   ciArrayKlass* expected_type;
814   arraycopy_helper(x, &flags, &expected_type);
815 
816   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
817                length.result(), tmp,
818                expected_type, flags, info);
819   set_no_result(x);
820 }
821 
822 
823 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
824 // _i2b, _i2c, _i2s
do_Convert(Convert * x)825 void LIRGenerator::do_Convert(Convert* x) {
826   if (!VM_Version::has_mtfprd()) {
827     switch (x->op()) {
828 
829       // int -> float: force spill
830       case Bytecodes::_l2f: {
831         if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
832           // fcfid+frsp needs fixup code to avoid rounding incompatibility.
833           address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
834           LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
835           set_result(x, result);
836           return;
837         } // else fallthru
838       }
839       case Bytecodes::_l2d: {
840         LIRItem value(x->value(), this);
841         LIR_Opr reg = rlock_result(x);
842         value.load_item();
843         LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
844         __ convert(x->op(), tmp, reg);
845         return;
846       }
847       case Bytecodes::_i2f:
848       case Bytecodes::_i2d: {
849         LIRItem value(x->value(), this);
850         LIR_Opr reg = rlock_result(x);
851         value.load_item();
852         // Convert i2l first.
853         LIR_Opr tmp1 = new_register(T_LONG);
854         __ convert(Bytecodes::_i2l, value.result(), tmp1);
855         LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
856         __ convert(x->op(), tmp2, reg);
857         return;
858       }
859 
860       // float -> int: result will be stored
861       case Bytecodes::_f2l:
862       case Bytecodes::_d2l: {
863         LIRItem value(x->value(), this);
864         LIR_Opr reg = rlock_result(x);
865         value.set_destroys_register(); // USE_KILL
866         value.load_item();
867         set_vreg_flag(reg, must_start_in_memory);
868         __ convert(x->op(), value.result(), reg);
869         return;
870       }
871       case Bytecodes::_f2i:
872       case Bytecodes::_d2i: {
873         LIRItem value(x->value(), this);
874         LIR_Opr reg = rlock_result(x);
875         value.set_destroys_register(); // USE_KILL
876         value.load_item();
877         // Convert l2i afterwards.
878         LIR_Opr tmp1 = new_register(T_LONG);
879         set_vreg_flag(tmp1, must_start_in_memory);
880         __ convert(x->op(), value.result(), tmp1);
881         __ convert(Bytecodes::_l2i, tmp1, reg);
882         return;
883       }
884 
885       // Within same category: just register conversions.
886       case Bytecodes::_i2b:
887       case Bytecodes::_i2c:
888       case Bytecodes::_i2s:
889       case Bytecodes::_i2l:
890       case Bytecodes::_l2i:
891       case Bytecodes::_f2d:
892       case Bytecodes::_d2f:
893         break;
894 
895       default: ShouldNotReachHere();
896     }
897   }
898 
899   // Register conversion.
900   LIRItem value(x->value(), this);
901   LIR_Opr reg = rlock_result(x);
902   value.load_item();
903   switch (x->op()) {
904     case Bytecodes::_f2l:
905     case Bytecodes::_d2l:
906     case Bytecodes::_f2i:
907     case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL
908     default: break;
909   }
910   __ convert(x->op(), value.result(), reg);
911 }
912 
913 
do_NewInstance(NewInstance * x)914 void LIRGenerator::do_NewInstance(NewInstance* x) {
915   // This instruction can be deoptimized in the slow path.
916   const LIR_Opr reg = result_register_for(x->type());
917 #ifndef PRODUCT
918   if (PrintNotLoaded && !x->klass()->is_loaded()) {
919     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
920   }
921 #endif
922   CodeEmitInfo* info = state_for(x, x->state());
923   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub).
924   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
925   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
926   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
927   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
928   new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
929 
930   // Must prevent reordering of stores for object initialization
931   // with stores that publish the new object.
932   __ membar_storestore();
933   LIR_Opr result = rlock_result(x);
934   __ move(reg, result);
935 }
936 
937 
do_NewTypeArray(NewTypeArray * x)938 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
939   // Evaluate state_for early since it may emit code.
940   CodeEmitInfo* info = state_for(x, x->state());
941 
942   LIRItem length(x->length(), this);
943   length.load_item();
944 
945   LIR_Opr reg = result_register_for(x->type());
946   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub).
947   // We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub).
948   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
949   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
950   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
951   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
952   LIR_Opr len = length.result();
953   BasicType elem_type = x->elt_type();
954 
955   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
956 
957   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
958   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
959 
960   // Must prevent reordering of stores for object initialization
961   // with stores that publish the new object.
962   __ membar_storestore();
963   LIR_Opr result = rlock_result(x);
964   __ move(reg, result);
965 }
966 
967 
do_NewObjectArray(NewObjectArray * x)968 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
969   // Evaluate state_for early since it may emit code.
970   CodeEmitInfo* info = state_for(x, x->state());
971   // In case of patching (i.e., object class is not yet loaded),
972   // we need to reexecute the instruction and therefore provide
973   // the state before the parameters have been consumed.
974   CodeEmitInfo* patching_info = NULL;
975   if (!x->klass()->is_loaded() || PatchALot) {
976     patching_info = state_for(x, x->state_before());
977   }
978 
979   LIRItem length(x->length(), this);
980   length.load_item();
981 
982   const LIR_Opr reg = result_register_for(x->type());
983   LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub).
984   // We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub).
985   LIR_Opr tmp1 = FrameMap::R5_oop_opr;
986   LIR_Opr tmp2 = FrameMap::R6_oop_opr;
987   LIR_Opr tmp3 = FrameMap::R7_oop_opr;
988   LIR_Opr tmp4 = FrameMap::R8_oop_opr;
989   LIR_Opr len = length.result();
990 
991   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
992   ciMetadata* obj = ciObjArrayKlass::make(x->klass());
993   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
994     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
995   }
996   klass2reg_with_patching(klass_reg, obj, patching_info);
997   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
998 
999   // Must prevent reordering of stores for object initialization
1000   // with stores that publish the new object.
1001   __ membar_storestore();
1002   LIR_Opr result = rlock_result(x);
1003   __ move(reg, result);
1004 }
1005 
1006 
do_NewMultiArray(NewMultiArray * x)1007 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1008   Values* dims = x->dims();
1009   int i = dims->length();
1010   LIRItemList* items = new LIRItemList(i, i, NULL);
1011   while (i-- > 0) {
1012     LIRItem* size = new LIRItem(dims->at(i), this);
1013     items->at_put(i, size);
1014   }
1015 
1016   // Evaluate state_for early since it may emit code.
1017   CodeEmitInfo* patching_info = NULL;
1018   if (!x->klass()->is_loaded() || PatchALot) {
1019     patching_info = state_for(x, x->state_before());
1020 
1021     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1022     // clone all handlers (NOTE: Usually this is handled transparently
1023     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1024     // is done explicitly here because a stub isn't being used).
1025     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1026   }
1027   CodeEmitInfo* info = state_for(x, x->state());
1028 
1029   i = dims->length();
1030   while (i-- > 0) {
1031     LIRItem* size = items->at(i);
1032     size->load_nonconstant();
1033     // FrameMap::_reserved_argument_area_size includes the dimensions
1034     // varargs, because it's initialized to hir()->max_stack() when the
1035     // FrameMap is created.
1036     store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
1037   }
1038 
1039   const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path.
1040   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1041 
1042   LIR_Opr rank = FrameMap::R5_opr; // Used by slow path.
1043   __ move(LIR_OprFact::intConst(x->rank()), rank);
1044 
1045   LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path.
1046   __ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
1047           varargs);
1048 
1049   // Note: This instruction can be deoptimized in the slow path.
1050   LIR_OprList* args = new LIR_OprList(3);
1051   args->append(klass_reg);
1052   args->append(rank);
1053   args->append(varargs);
1054   const LIR_Opr reg = result_register_for(x->type());
1055   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1056                   LIR_OprFact::illegalOpr,
1057                   reg, args, info);
1058 
1059   // Must prevent reordering of stores for object initialization
1060   // with stores that publish the new object.
1061   __ membar_storestore();
1062   LIR_Opr result = rlock_result(x);
1063   __ move(reg, result);
1064 }
1065 
1066 
do_BlockBegin(BlockBegin * x)1067 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1068   // nothing to do for now
1069 }
1070 
1071 
do_CheckCast(CheckCast * x)1072 void LIRGenerator::do_CheckCast(CheckCast* x) {
1073   LIRItem obj(x->obj(), this);
1074   CodeEmitInfo* patching_info = NULL;
1075   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1076     // Must do this before locking the destination register as
1077     // an oop register, and before the obj is loaded (so x->obj()->item()
1078     // is valid for creating a debug info location).
1079     patching_info = state_for(x, x->state_before());
1080   }
1081   obj.load_item();
1082   LIR_Opr out_reg = rlock_result(x);
1083   CodeStub* stub;
1084   CodeEmitInfo* info_for_exception =
1085       (x->needs_exception_state() ? state_for(x) :
1086                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1087 
1088   if (x->is_incompatible_class_change_check()) {
1089     assert(patching_info == NULL, "can't patch this");
1090     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1091                                    LIR_OprFact::illegalOpr, info_for_exception);
1092   } else if (x->is_invokespecial_receiver_check()) {
1093     assert(patching_info == NULL, "can't patch this");
1094     stub = new DeoptimizeStub(info_for_exception,
1095                               Deoptimization::Reason_class_check,
1096                               Deoptimization::Action_none);
1097   } else {
1098     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1099   }
1100   // Following registers are used by slow_subtype_check:
1101   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1102   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1103   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1104   __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1105                x->direct_compare(), info_for_exception, patching_info, stub,
1106                x->profiled_method(), x->profiled_bci());
1107 }
1108 
1109 
do_InstanceOf(InstanceOf * x)1110 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1111   LIRItem obj(x->obj(), this);
1112   CodeEmitInfo* patching_info = NULL;
1113   if (!x->klass()->is_loaded() || PatchALot) {
1114     patching_info = state_for(x, x->state_before());
1115   }
1116   // Ensure the result register is not the input register because the
1117   // result is initialized before the patching safepoint.
1118   obj.load_item();
1119   LIR_Opr out_reg = rlock_result(x);
1120   // Following registers are used by slow_subtype_check:
1121   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1122   LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1123   LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1124   __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1125                 x->direct_compare(), patching_info,
1126                 x->profiled_method(), x->profiled_bci());
1127 }
1128 
1129 
do_If(If * x)1130 void LIRGenerator::do_If(If* x) {
1131   assert(x->number_of_sux() == 2, "inconsistency");
1132   ValueTag tag = x->x()->type()->tag();
1133   LIRItem xitem(x->x(), this);
1134   LIRItem yitem(x->y(), this);
1135   LIRItem* xin = &xitem;
1136   LIRItem* yin = &yitem;
1137   If::Condition cond = x->cond();
1138 
1139   LIR_Opr left = LIR_OprFact::illegalOpr;
1140   LIR_Opr right = LIR_OprFact::illegalOpr;
1141 
1142   xin->load_item();
1143   left = xin->result();
1144 
1145   if (yin->result()->is_constant() && yin->result()->type() == T_INT &&
1146       Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) {
1147     // Inline int constants which are small enough to be immediate operands.
1148     right = LIR_OprFact::value_type(yin->value()->type());
1149   } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1150              (cond == If::eql || cond == If::neq)) {
1151     // Inline long zero.
1152     right = LIR_OprFact::value_type(yin->value()->type());
1153   } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
1154     right = LIR_OprFact::value_type(yin->value()->type());
1155   } else {
1156     yin->load_item();
1157     right = yin->result();
1158   }
1159   set_no_result(x);
1160 
1161   // Add safepoint before generating condition code so it can be recomputed.
1162   if (x->is_safepoint()) {
1163     // Increment backedge counter if needed.
1164     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1165         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1166     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1167   }
1168 
1169   __ cmp(lir_cond(cond), left, right);
1170   // Generate branch profiling. Profiling code doesn't kill flags.
1171   profile_branch(x, cond);
1172   move_to_phi(x->state());
1173   if (x->x()->type()->is_float_kind()) {
1174     __ branch(lir_cond(cond), x->tsux(), x->usux());
1175   } else {
1176     __ branch(lir_cond(cond), x->tsux());
1177   }
1178   assert(x->default_sux() == x->fsux(), "wrong destination above");
1179   __ jump(x->default_sux());
1180 }
1181 
1182 
getThreadPointer()1183 LIR_Opr LIRGenerator::getThreadPointer() {
1184   return FrameMap::as_pointer_opr(R16_thread);
1185 }
1186 
1187 
trace_block_entry(BlockBegin * block)1188 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1189   LIR_Opr arg1 = FrameMap::R3_opr; // ARG1
1190   __ move(LIR_OprFact::intConst(block->block_id()), arg1);
1191   LIR_OprList* args = new LIR_OprList(1);
1192   args->append(arg1);
1193   address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1194   __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1195 }
1196 
1197 
volatile_field_store(LIR_Opr value,LIR_Address * address,CodeEmitInfo * info)1198 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1199                                         CodeEmitInfo* info) {
1200 #ifdef _LP64
1201   __ store(value, address, info);
1202 #else
1203   Unimplemented();
1204 //  __ volatile_store_mem_reg(value, address, info);
1205 #endif
1206 }
1207 
volatile_field_load(LIR_Address * address,LIR_Opr result,CodeEmitInfo * info)1208 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1209                                        CodeEmitInfo* info) {
1210 #ifdef _LP64
1211   __ load(address, result, info);
1212 #else
1213   Unimplemented();
1214 //  __ volatile_load_mem_reg(address, result, info);
1215 #endif
1216 }
1217 
1218 
do_update_CRC32(Intrinsic * x)1219 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1220   assert(UseCRC32Intrinsics, "or should not be here");
1221   LIR_Opr result = rlock_result(x);
1222 
1223   switch (x->id()) {
1224     case vmIntrinsics::_updateCRC32: {
1225       LIRItem crc(x->argument_at(0), this);
1226       LIRItem val(x->argument_at(1), this);
1227       // Registers destroyed by update_crc32.
1228       crc.set_destroys_register();
1229       val.set_destroys_register();
1230       crc.load_item();
1231       val.load_item();
1232       __ update_crc32(crc.result(), val.result(), result);
1233       break;
1234     }
1235     case vmIntrinsics::_updateBytesCRC32:
1236     case vmIntrinsics::_updateByteBufferCRC32: {
1237       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1238 
1239       LIRItem crc(x->argument_at(0), this);
1240       LIRItem buf(x->argument_at(1), this);
1241       LIRItem off(x->argument_at(2), this);
1242       LIRItem len(x->argument_at(3), this);
1243       buf.load_item();
1244       off.load_nonconstant();
1245 
1246       LIR_Opr index = off.result();
1247       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1248       if (off.result()->is_constant()) {
1249         index = LIR_OprFact::illegalOpr;
1250         offset += off.result()->as_jint();
1251       }
1252       LIR_Opr base_op = buf.result();
1253       LIR_Address* a = NULL;
1254 
1255       if (index->is_valid()) {
1256         LIR_Opr tmp = new_register(T_LONG);
1257         __ convert(Bytecodes::_i2l, index, tmp);
1258         index = tmp;
1259         __ add(index, LIR_OprFact::intptrConst(offset), index);
1260         a = new LIR_Address(base_op, index, T_BYTE);
1261       } else {
1262         a = new LIR_Address(base_op, offset, T_BYTE);
1263       }
1264 
1265       BasicTypeList signature(3);
1266       signature.append(T_INT);
1267       signature.append(T_ADDRESS);
1268       signature.append(T_INT);
1269       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1270       const LIR_Opr result_reg = result_register_for(x->type());
1271 
1272       LIR_Opr arg1 = cc->at(0),
1273               arg2 = cc->at(1),
1274               arg3 = cc->at(2);
1275 
1276       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1277       __ leal(LIR_OprFact::address(a), arg2);
1278       len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
1279 
1280       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1281       __ move(result_reg, result);
1282       break;
1283     }
1284     default: {
1285       ShouldNotReachHere();
1286     }
1287   }
1288 }
1289 
do_update_CRC32C(Intrinsic * x)1290 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1291   assert(UseCRC32CIntrinsics, "or should not be here");
1292   LIR_Opr result = rlock_result(x);
1293 
1294   switch (x->id()) {
1295     case vmIntrinsics::_updateBytesCRC32C:
1296     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1297       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1298 
1299       LIRItem crc(x->argument_at(0), this);
1300       LIRItem buf(x->argument_at(1), this);
1301       LIRItem off(x->argument_at(2), this);
1302       LIRItem end(x->argument_at(3), this);
1303       buf.load_item();
1304       off.load_nonconstant();
1305       end.load_nonconstant();
1306 
1307       // len = end - off
1308       LIR_Opr len  = end.result();
1309       LIR_Opr tmpA = new_register(T_INT);
1310       LIR_Opr tmpB = new_register(T_INT);
1311       __ move(end.result(), tmpA);
1312       __ move(off.result(), tmpB);
1313       __ sub(tmpA, tmpB, tmpA);
1314       len = tmpA;
1315 
1316       LIR_Opr index = off.result();
1317       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1318       if (off.result()->is_constant()) {
1319         index = LIR_OprFact::illegalOpr;
1320         offset += off.result()->as_jint();
1321       }
1322       LIR_Opr base_op = buf.result();
1323       LIR_Address* a = NULL;
1324 
1325       if (index->is_valid()) {
1326         LIR_Opr tmp = new_register(T_LONG);
1327         __ convert(Bytecodes::_i2l, index, tmp);
1328         index = tmp;
1329         __ add(index, LIR_OprFact::intptrConst(offset), index);
1330         a = new LIR_Address(base_op, index, T_BYTE);
1331       } else {
1332         a = new LIR_Address(base_op, offset, T_BYTE);
1333       }
1334 
1335       BasicTypeList signature(3);
1336       signature.append(T_INT);
1337       signature.append(T_ADDRESS);
1338       signature.append(T_INT);
1339       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1340       const LIR_Opr result_reg = result_register_for(x->type());
1341 
1342       LIR_Opr arg1 = cc->at(0),
1343               arg2 = cc->at(1),
1344               arg3 = cc->at(2);
1345 
1346       crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
1347       __ leal(LIR_OprFact::address(a), arg2);
1348       __ move(len, cc->at(2));   // We skip int->long conversion here, because CRC32C stub expects int.
1349 
1350       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1351       __ move(result_reg, result);
1352       break;
1353     }
1354     default: {
1355       ShouldNotReachHere();
1356     }
1357   }
1358 }
1359 
do_FmaIntrinsic(Intrinsic * x)1360 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1361   assert(x->number_of_arguments() == 3, "wrong type");
1362   assert(UseFMA, "Needs FMA instructions support.");
1363   LIRItem value(x->argument_at(0), this);
1364   LIRItem value1(x->argument_at(1), this);
1365   LIRItem value2(x->argument_at(2), this);
1366 
1367   value.load_item();
1368   value1.load_item();
1369   value2.load_item();
1370 
1371   LIR_Opr calc_input = value.result();
1372   LIR_Opr calc_input1 = value1.result();
1373   LIR_Opr calc_input2 = value2.result();
1374   LIR_Opr calc_result = rlock_result(x);
1375 
1376   switch (x->id()) {
1377   case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1378   case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1379   default:                  ShouldNotReachHere();
1380   }
1381 }
1382 
do_vectorizedMismatch(Intrinsic * x)1383 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1384   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1385 }
1386