1 /*
2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "vmreg_aarch64.inline.hpp"
41 
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47 
48 // Item will be loaded into a byte register; Intel only
load_byte_item()49 void LIRItem::load_byte_item() {
50   load_item();
51 }
52 
53 
load_nonconstant()54 void LIRItem::load_nonconstant() {
55   LIR_Opr r = value()->operand();
56   if (r->is_constant()) {
57     _result = r;
58   } else {
59     load_item();
60   }
61 }
62 
63 //--------------------------------------------------------------
64 //               LIRGenerator
65 //--------------------------------------------------------------
66 
67 
exceptionOopOpr()68 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
exceptionPcOpr()69 LIR_Opr LIRGenerator::exceptionPcOpr()  { return FrameMap::r3_opr; }
divInOpr()70 LIR_Opr LIRGenerator::divInOpr()        { Unimplemented(); return LIR_OprFact::illegalOpr; }
divOutOpr()71 LIR_Opr LIRGenerator::divOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
remOutOpr()72 LIR_Opr LIRGenerator::remOutOpr()       { Unimplemented(); return LIR_OprFact::illegalOpr; }
shiftCountOpr()73 LIR_Opr LIRGenerator::shiftCountOpr()   { Unimplemented(); return LIR_OprFact::illegalOpr; }
syncLockOpr()74 LIR_Opr LIRGenerator::syncLockOpr()     { return new_register(T_INT); }
syncTempOpr()75 LIR_Opr LIRGenerator::syncTempOpr()     { return FrameMap::r0_opr; }
getThreadTemp()76 LIR_Opr LIRGenerator::getThreadTemp()   { return LIR_OprFact::illegalOpr; }
77 
78 
result_register_for(ValueType * type,bool callee)79 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
80   LIR_Opr opr;
81   switch (type->tag()) {
82     case intTag:     opr = FrameMap::r0_opr;          break;
83     case objectTag:  opr = FrameMap::r0_oop_opr;      break;
84     case longTag:    opr = FrameMap::long0_opr;        break;
85     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
86     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
87 
88     case addressTag:
89     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
90   }
91 
92   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
93   return opr;
94 }
95 
96 
rlock_byte(BasicType type)97 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
98   LIR_Opr reg = new_register(T_INT);
99   set_vreg_flag(reg, LIRGenerator::byte_reg);
100   return reg;
101 }
102 
103 
104 //--------- loading items into registers --------------------------------
105 
106 
can_store_as_constant(Value v,BasicType type) const107 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
108   if (v->type()->as_IntConstant() != NULL) {
109     return v->type()->as_IntConstant()->value() == 0L;
110   } else if (v->type()->as_LongConstant() != NULL) {
111     return v->type()->as_LongConstant()->value() == 0L;
112   } else if (v->type()->as_ObjectConstant() != NULL) {
113     return v->type()->as_ObjectConstant()->value()->is_null_object();
114   } else {
115     return false;
116   }
117 }
118 
can_inline_as_constant(Value v) const119 bool LIRGenerator::can_inline_as_constant(Value v) const {
120   // FIXME: Just a guess
121   if (v->type()->as_IntConstant() != NULL) {
122     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
123   } else if (v->type()->as_LongConstant() != NULL) {
124     return v->type()->as_LongConstant()->value() == 0L;
125   } else if (v->type()->as_ObjectConstant() != NULL) {
126     return v->type()->as_ObjectConstant()->value()->is_null_object();
127   } else {
128     return false;
129   }
130 }
131 
132 
can_inline_as_constant(LIR_Const * c) const133 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
134 
135 
safepoint_poll_register()136 LIR_Opr LIRGenerator::safepoint_poll_register() {
137   return LIR_OprFact::illegalOpr;
138 }
139 
140 
generate_address(LIR_Opr base,LIR_Opr index,int shift,int disp,BasicType type)141 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
142                                             int shift, int disp, BasicType type) {
143   assert(base->is_register(), "must be");
144   intx large_disp = disp;
145 
146   // accumulate fixed displacements
147   if (index->is_constant()) {
148     LIR_Const *constant = index->as_constant_ptr();
149     if (constant->type() == T_INT) {
150       large_disp += index->as_jint() << shift;
151     } else {
152       assert(constant->type() == T_LONG, "should be");
153       jlong c = index->as_jlong() << shift;
154       if ((jlong)((jint)c) == c) {
155         large_disp += c;
156         index = LIR_OprFact::illegalOpr;
157       } else {
158         LIR_Opr tmp = new_register(T_LONG);
159         __ move(index, tmp);
160         index = tmp;
161         // apply shift and displacement below
162       }
163     }
164   }
165 
166   if (index->is_register()) {
167     // apply the shift and accumulate the displacement
168     if (shift > 0) {
169       LIR_Opr tmp = new_pointer_register();
170       __ shift_left(index, shift, tmp);
171       index = tmp;
172     }
173     if (large_disp != 0) {
174       LIR_Opr tmp = new_pointer_register();
175       if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
176         __ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
177         index = tmp;
178       } else {
179         __ move(LIR_OprFact::intptrConst(large_disp), tmp);
180         __ add(tmp, index, tmp);
181         index = tmp;
182       }
183       large_disp = 0;
184     }
185   } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
186     // index is illegal so replace it with the displacement loaded into a register
187     index = new_pointer_register();
188     __ move(LIR_OprFact::intptrConst(large_disp), index);
189     large_disp = 0;
190   }
191 
192   // at this point we either have base + index or base + displacement
193   if (large_disp == 0 && index->is_register()) {
194     return new LIR_Address(base, index, type);
195   } else {
196     assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
197     return new LIR_Address(base, large_disp, type);
198   }
199 }
200 
emit_array_address(LIR_Opr array_opr,LIR_Opr index_opr,BasicType type)201 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
202                                               BasicType type) {
203   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
204   int elem_size = type2aelembytes(type);
205   int shift = exact_log2(elem_size);
206 
207   LIR_Address* addr;
208   if (index_opr->is_constant()) {
209     addr = new LIR_Address(array_opr,
210                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
211   } else {
212     if (offset_in_bytes) {
213       LIR_Opr tmp = new_pointer_register();
214       __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
215       array_opr = tmp;
216       offset_in_bytes = 0;
217     }
218     addr =  new LIR_Address(array_opr,
219                             index_opr,
220                             LIR_Address::scale(type),
221                             offset_in_bytes, type);
222   }
223   return addr;
224 }
225 
load_immediate(int x,BasicType type)226 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
227   LIR_Opr r;
228   if (type == T_LONG) {
229     r = LIR_OprFact::longConst(x);
230     if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
231       LIR_Opr tmp = new_register(type);
232       __ move(r, tmp);
233       return tmp;
234     }
235   } else if (type == T_INT) {
236     r = LIR_OprFact::intConst(x);
237     if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
238       // This is all rather nasty.  We don't know whether our constant
239       // is required for a logical or an arithmetic operation, wo we
240       // don't know what the range of valid values is!!
241       LIR_Opr tmp = new_register(type);
242       __ move(r, tmp);
243       return tmp;
244     }
245   } else {
246     ShouldNotReachHere();
247     r = NULL;  // unreachable
248   }
249   return r;
250 }
251 
252 
253 
increment_counter(address counter,BasicType type,int step)254 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
255   LIR_Opr pointer = new_pointer_register();
256   __ move(LIR_OprFact::intptrConst(counter), pointer);
257   LIR_Address* addr = new LIR_Address(pointer, type);
258   increment_counter(addr, step);
259 }
260 
261 
increment_counter(LIR_Address * addr,int step)262 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
263   LIR_Opr imm = NULL;
264   switch(addr->type()) {
265   case T_INT:
266     imm = LIR_OprFact::intConst(step);
267     break;
268   case T_LONG:
269     imm = LIR_OprFact::longConst(step);
270     break;
271   default:
272     ShouldNotReachHere();
273   }
274   LIR_Opr reg = new_register(addr->type());
275   __ load(addr, reg);
276   __ add(reg, imm, reg);
277   __ store(reg, addr);
278 }
279 
cmp_mem_int(LIR_Condition condition,LIR_Opr base,int disp,int c,CodeEmitInfo * info)280 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
281   LIR_Opr reg = new_register(T_INT);
282   __ load(generate_address(base, disp, T_INT), reg, info);
283   __ cmp(condition, reg, LIR_OprFact::intConst(c));
284 }
285 
cmp_reg_mem(LIR_Condition condition,LIR_Opr reg,LIR_Opr base,int disp,BasicType type,CodeEmitInfo * info)286 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
287   LIR_Opr reg1 = new_register(T_INT);
288   __ load(generate_address(base, disp, type), reg1, info);
289   __ cmp(condition, reg, reg1);
290 }
291 
292 
strength_reduce_multiply(LIR_Opr left,jint c,LIR_Opr result,LIR_Opr tmp)293 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
294 
295   if (is_power_of_2(c - 1)) {
296     __ shift_left(left, exact_log2(c - 1), tmp);
297     __ add(tmp, left, result);
298     return true;
299   } else if (is_power_of_2(c + 1)) {
300     __ shift_left(left, exact_log2(c + 1), tmp);
301     __ sub(tmp, left, result);
302     return true;
303   } else {
304     return false;
305   }
306 }
307 
store_stack_parameter(LIR_Opr item,ByteSize offset_from_sp)308 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
309   BasicType type = item->type();
310   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
311 }
312 
array_store_check(LIR_Opr value,LIR_Opr array,CodeEmitInfo * store_check_info,ciMethod * profiled_method,int profiled_bci)313 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
314     LIR_Opr tmp1 = new_register(objectType);
315     LIR_Opr tmp2 = new_register(objectType);
316     LIR_Opr tmp3 = new_register(objectType);
317     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
318 }
319 
320 //----------------------------------------------------------------------
321 //             visitor functions
322 //----------------------------------------------------------------------
323 
do_MonitorEnter(MonitorEnter * x)324 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
325   assert(x->is_pinned(),"");
326   LIRItem obj(x->obj(), this);
327   obj.load_item();
328 
329   set_no_result(x);
330 
331   // "lock" stores the address of the monitor stack slot, so this is not an oop
332   LIR_Opr lock = new_register(T_INT);
333   // Need a scratch register for biased locking
334   LIR_Opr scratch = LIR_OprFact::illegalOpr;
335   if (UseBiasedLocking) {
336     scratch = new_register(T_INT);
337   }
338 
339   CodeEmitInfo* info_for_exception = NULL;
340   if (x->needs_null_check()) {
341     info_for_exception = state_for(x);
342   }
343   // this CodeEmitInfo must not have the xhandlers because here the
344   // object is already locked (xhandlers expect object to be unlocked)
345   CodeEmitInfo* info = state_for(x, x->state(), true);
346   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
347                         x->monitor_no(), info_for_exception, info);
348 }
349 
350 
do_MonitorExit(MonitorExit * x)351 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
352   assert(x->is_pinned(),"");
353 
354   LIRItem obj(x->obj(), this);
355   obj.dont_load_item();
356 
357   LIR_Opr lock = new_register(T_INT);
358   LIR_Opr obj_temp = new_register(T_INT);
359   set_no_result(x);
360   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
361 }
362 
363 
do_NegateOp(NegateOp * x)364 void LIRGenerator::do_NegateOp(NegateOp* x) {
365 
366   LIRItem from(x->x(), this);
367   from.load_item();
368   LIR_Opr result = rlock_result(x);
369   __ negate (from.result(), result);
370 
371 }
372 
373 // for  _fadd, _fmul, _fsub, _fdiv, _frem
374 //      _dadd, _dmul, _dsub, _ddiv, _drem
do_ArithmeticOp_FPU(ArithmeticOp * x)375 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
376 
377   if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
378     // float remainder is implemented as a direct call into the runtime
379     LIRItem right(x->x(), this);
380     LIRItem left(x->y(), this);
381 
382     BasicTypeList signature(2);
383     if (x->op() == Bytecodes::_frem) {
384       signature.append(T_FLOAT);
385       signature.append(T_FLOAT);
386     } else {
387       signature.append(T_DOUBLE);
388       signature.append(T_DOUBLE);
389     }
390     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
391 
392     const LIR_Opr result_reg = result_register_for(x->type());
393     left.load_item_force(cc->at(1));
394     right.load_item();
395 
396     __ move(right.result(), cc->at(0));
397 
398     address entry;
399     if (x->op() == Bytecodes::_frem) {
400       entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
401     } else {
402       entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
403     }
404 
405     LIR_Opr result = rlock_result(x);
406     __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
407     __ move(result_reg, result);
408 
409     return;
410   }
411 
412   LIRItem left(x->x(),  this);
413   LIRItem right(x->y(), this);
414   LIRItem* left_arg  = &left;
415   LIRItem* right_arg = &right;
416 
417   // Always load right hand side.
418   right.load_item();
419 
420   if (!left.is_register())
421     left.load_item();
422 
423   LIR_Opr reg = rlock(x);
424   LIR_Opr tmp = LIR_OprFact::illegalOpr;
425   if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
426     tmp = new_register(T_DOUBLE);
427   }
428 
429   arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp());
430 
431   set_result(x, round_item(reg));
432 }
433 
434 // for  _ladd, _lmul, _lsub, _ldiv, _lrem
do_ArithmeticOp_Long(ArithmeticOp * x)435 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
436 
437   // missing test if instr is commutative and if we should swap
438   LIRItem left(x->x(), this);
439   LIRItem right(x->y(), this);
440 
441   if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
442 
443     left.load_item();
444     bool need_zero_check = true;
445     if (right.is_constant()) {
446       jlong c = right.get_jlong_constant();
447       // no need to do div-by-zero check if the divisor is a non-zero constant
448       if (c != 0) need_zero_check = false;
449       // do not load right if the divisor is a power-of-2 constant
450       if (c > 0 && is_power_of_2_long(c)) {
451         right.dont_load_item();
452       } else {
453         right.load_item();
454       }
455     } else {
456       right.load_item();
457     }
458     if (need_zero_check) {
459       CodeEmitInfo* info = state_for(x);
460       __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
461       __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
462     }
463 
464     rlock_result(x);
465     switch (x->op()) {
466     case Bytecodes::_lrem:
467       __ rem (left.result(), right.result(), x->operand());
468       break;
469     case Bytecodes::_ldiv:
470       __ div (left.result(), right.result(), x->operand());
471       break;
472     default:
473       ShouldNotReachHere();
474       break;
475     }
476 
477 
478   } else {
479     assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
480             "expect lmul, ladd or lsub");
481     // add, sub, mul
482     left.load_item();
483     if (! right.is_register()) {
484       if (x->op() == Bytecodes::_lmul
485           || ! right.is_constant()
486           || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
487         right.load_item();
488       } else { // add, sub
489         assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
490         // don't load constants to save register
491         right.load_nonconstant();
492       }
493     }
494     rlock_result(x);
495     arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
496   }
497 }
498 
499 // for: _iadd, _imul, _isub, _idiv, _irem
do_ArithmeticOp_Int(ArithmeticOp * x)500 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
501 
502   // Test if instr is commutative and if we should swap
503   LIRItem left(x->x(),  this);
504   LIRItem right(x->y(), this);
505   LIRItem* left_arg = &left;
506   LIRItem* right_arg = &right;
507   if (x->is_commutative() && left.is_stack() && right.is_register()) {
508     // swap them if left is real stack (or cached) and right is real register(not cached)
509     left_arg = &right;
510     right_arg = &left;
511   }
512 
513   left_arg->load_item();
514 
515   // do not need to load right, as we can handle stack and constants
516   if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
517 
518     rlock_result(x);
519     bool need_zero_check = true;
520     if (right.is_constant()) {
521       jint c = right.get_jint_constant();
522       // no need to do div-by-zero check if the divisor is a non-zero constant
523       if (c != 0) need_zero_check = false;
524       // do not load right if the divisor is a power-of-2 constant
525       if (c > 0 && is_power_of_2(c)) {
526         right_arg->dont_load_item();
527       } else {
528         right_arg->load_item();
529       }
530     } else {
531       right_arg->load_item();
532     }
533     if (need_zero_check) {
534       CodeEmitInfo* info = state_for(x);
535       __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
536       __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
537     }
538 
539     LIR_Opr ill = LIR_OprFact::illegalOpr;
540     if (x->op() == Bytecodes::_irem) {
541       __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
542     } else if (x->op() == Bytecodes::_idiv) {
543       __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
544     }
545 
546   } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
547     if (right.is_constant()
548         && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
549       right.load_nonconstant();
550     } else {
551       right.load_item();
552     }
553     rlock_result(x);
554     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
555   } else {
556     assert (x->op() == Bytecodes::_imul, "expect imul");
557     if (right.is_constant()) {
558       jint c = right.get_jint_constant();
559       if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
560         right_arg->dont_load_item();
561       } else {
562         // Cannot use constant op.
563         right_arg->load_item();
564       }
565     } else {
566       right.load_item();
567     }
568     rlock_result(x);
569     arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
570   }
571 }
572 
do_ArithmeticOp(ArithmeticOp * x)573 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
574   // when an operand with use count 1 is the left operand, then it is
575   // likely that no move for 2-operand-LIR-form is necessary
576   if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
577     x->swap_operands();
578   }
579 
580   ValueTag tag = x->type()->tag();
581   assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
582   switch (tag) {
583     case floatTag:
584     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
585     case longTag:    do_ArithmeticOp_Long(x); return;
586     case intTag:     do_ArithmeticOp_Int(x);  return;
587   }
588   ShouldNotReachHere();
589 }
590 
591 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
do_ShiftOp(ShiftOp * x)592 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
593 
594   LIRItem left(x->x(),  this);
595   LIRItem right(x->y(), this);
596 
597   left.load_item();
598 
599   rlock_result(x);
600   if (right.is_constant()) {
601     right.dont_load_item();
602 
603     switch (x->op()) {
604     case Bytecodes::_ishl: {
605       int c = right.get_jint_constant() & 0x1f;
606       __ shift_left(left.result(), c, x->operand());
607       break;
608     }
609     case Bytecodes::_ishr: {
610       int c = right.get_jint_constant() & 0x1f;
611       __ shift_right(left.result(), c, x->operand());
612       break;
613     }
614     case Bytecodes::_iushr: {
615       int c = right.get_jint_constant() & 0x1f;
616       __ unsigned_shift_right(left.result(), c, x->operand());
617       break;
618     }
619     case Bytecodes::_lshl: {
620       int c = right.get_jint_constant() & 0x3f;
621       __ shift_left(left.result(), c, x->operand());
622       break;
623     }
624     case Bytecodes::_lshr: {
625       int c = right.get_jint_constant() & 0x3f;
626       __ shift_right(left.result(), c, x->operand());
627       break;
628     }
629     case Bytecodes::_lushr: {
630       int c = right.get_jint_constant() & 0x3f;
631       __ unsigned_shift_right(left.result(), c, x->operand());
632       break;
633     }
634     default:
635       ShouldNotReachHere();
636     }
637   } else {
638     right.load_item();
639     LIR_Opr tmp = new_register(T_INT);
640     switch (x->op()) {
641     case Bytecodes::_ishl: {
642       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
643       __ shift_left(left.result(), tmp, x->operand(), tmp);
644       break;
645     }
646     case Bytecodes::_ishr: {
647       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
648       __ shift_right(left.result(), tmp, x->operand(), tmp);
649       break;
650     }
651     case Bytecodes::_iushr: {
652       __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
653       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
654       break;
655     }
656     case Bytecodes::_lshl: {
657       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
658       __ shift_left(left.result(), tmp, x->operand(), tmp);
659       break;
660     }
661     case Bytecodes::_lshr: {
662       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
663       __ shift_right(left.result(), tmp, x->operand(), tmp);
664       break;
665     }
666     case Bytecodes::_lushr: {
667       __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
668       __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
669       break;
670     }
671     default:
672       ShouldNotReachHere();
673     }
674   }
675 }
676 
677 // _iand, _land, _ior, _lor, _ixor, _lxor
do_LogicOp(LogicOp * x)678 void LIRGenerator::do_LogicOp(LogicOp* x) {
679 
680   LIRItem left(x->x(),  this);
681   LIRItem right(x->y(), this);
682 
683   left.load_item();
684 
685   rlock_result(x);
686   if (right.is_constant()
687       && ((right.type()->tag() == intTag
688            && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
689           || (right.type()->tag() == longTag
690               && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant()))))  {
691     right.dont_load_item();
692   } else {
693     right.load_item();
694   }
695   switch (x->op()) {
696   case Bytecodes::_iand:
697   case Bytecodes::_land:
698     __ logical_and(left.result(), right.result(), x->operand()); break;
699   case Bytecodes::_ior:
700   case Bytecodes::_lor:
701     __ logical_or (left.result(), right.result(), x->operand()); break;
702   case Bytecodes::_ixor:
703   case Bytecodes::_lxor:
704     __ logical_xor(left.result(), right.result(), x->operand()); break;
705   default: Unimplemented();
706   }
707 }
708 
709 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
do_CompareOp(CompareOp * x)710 void LIRGenerator::do_CompareOp(CompareOp* x) {
711   LIRItem left(x->x(), this);
712   LIRItem right(x->y(), this);
713   ValueTag tag = x->x()->type()->tag();
714   if (tag == longTag) {
715     left.set_destroys_register();
716   }
717   left.load_item();
718   right.load_item();
719   LIR_Opr reg = rlock_result(x);
720 
721   if (x->x()->type()->is_float_kind()) {
722     Bytecodes::Code code = x->op();
723     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
724   } else if (x->x()->type()->tag() == longTag) {
725     __ lcmp2int(left.result(), right.result(), reg);
726   } else {
727     Unimplemented();
728   }
729 }
730 
atomic_cmpxchg(BasicType type,LIR_Opr addr,LIRItem & cmp_value,LIRItem & new_value)731 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
732   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
733   new_value.load_item();
734   cmp_value.load_item();
735   LIR_Opr result = new_register(T_INT);
736   if (type == T_OBJECT || type == T_ARRAY) {
737     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
738   } else if (type == T_INT) {
739     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
740   } else if (type == T_LONG) {
741     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
742   } else {
743     ShouldNotReachHere();
744     Unimplemented();
745   }
746   __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
747   return result;
748 }
749 
atomic_xchg(BasicType type,LIR_Opr addr,LIRItem & value)750 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
751   bool is_oop = type == T_OBJECT || type == T_ARRAY;
752   LIR_Opr result = new_register(type);
753   value.load_item();
754   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
755   LIR_Opr tmp = new_register(T_INT);
756   __ xchg(addr, value.result(), result, tmp);
757   return result;
758 }
759 
atomic_add(BasicType type,LIR_Opr addr,LIRItem & value)760 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
761   LIR_Opr result = new_register(type);
762   value.load_item();
763   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
764   LIR_Opr tmp = new_register(T_INT);
765   __ xadd(addr, value.result(), result, tmp);
766   return result;
767 }
768 
do_MathIntrinsic(Intrinsic * x)769 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
770   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
771   if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
772       x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
773       x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
774       x->id() == vmIntrinsics::_dlog10) {
775     do_LibmIntrinsic(x);
776     return;
777   }
778   switch (x->id()) {
779     case vmIntrinsics::_dabs:
780     case vmIntrinsics::_dsqrt: {
781       assert(x->number_of_arguments() == 1, "wrong type");
782       LIRItem value(x->argument_at(0), this);
783       value.load_item();
784       LIR_Opr dst = rlock_result(x);
785 
786       switch (x->id()) {
787         case vmIntrinsics::_dsqrt: {
788           __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
789           break;
790         }
791         case vmIntrinsics::_dabs: {
792           __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
793           break;
794         }
795       }
796       break;
797     }
798   }
799 }
800 
do_LibmIntrinsic(Intrinsic * x)801 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
802   LIRItem value(x->argument_at(0), this);
803   value.set_destroys_register();
804 
805   LIR_Opr calc_result = rlock_result(x);
806   LIR_Opr result_reg = result_register_for(x->type());
807 
808   CallingConvention* cc = NULL;
809 
810   if (x->id() == vmIntrinsics::_dpow) {
811     LIRItem value1(x->argument_at(1), this);
812 
813     value1.set_destroys_register();
814 
815     BasicTypeList signature(2);
816     signature.append(T_DOUBLE);
817     signature.append(T_DOUBLE);
818     cc = frame_map()->c_calling_convention(&signature);
819     value.load_item_force(cc->at(0));
820     value1.load_item_force(cc->at(1));
821   } else {
822     BasicTypeList signature(1);
823     signature.append(T_DOUBLE);
824     cc = frame_map()->c_calling_convention(&signature);
825     value.load_item_force(cc->at(0));
826   }
827 
828   switch (x->id()) {
829     case vmIntrinsics::_dexp:
830       if (StubRoutines::dexp() != NULL) {
831         __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
832       } else {
833         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
834       }
835       break;
836     case vmIntrinsics::_dlog:
837       if (StubRoutines::dlog() != NULL) {
838         __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
839       } else {
840         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
841       }
842       break;
843     case vmIntrinsics::_dlog10:
844       if (StubRoutines::dlog10() != NULL) {
845         __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
846       } else {
847         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
848       }
849       break;
850     case vmIntrinsics::_dpow:
851       if (StubRoutines::dpow() != NULL) {
852         __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
853       } else {
854         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
855       }
856       break;
857     case vmIntrinsics::_dsin:
858       if (StubRoutines::dsin() != NULL) {
859         __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
860       } else {
861         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
862       }
863       break;
864     case vmIntrinsics::_dcos:
865       if (StubRoutines::dcos() != NULL) {
866         __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
867       } else {
868         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
869       }
870       break;
871     case vmIntrinsics::_dtan:
872       if (StubRoutines::dtan() != NULL) {
873         __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
874       } else {
875         __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
876       }
877       break;
878     default:  ShouldNotReachHere();
879   }
880   __ move(result_reg, calc_result);
881 }
882 
883 
do_ArrayCopy(Intrinsic * x)884 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
885   assert(x->number_of_arguments() == 5, "wrong type");
886 
887   // Make all state_for calls early since they can emit code
888   CodeEmitInfo* info = state_for(x, x->state());
889 
890   LIRItem src(x->argument_at(0), this);
891   LIRItem src_pos(x->argument_at(1), this);
892   LIRItem dst(x->argument_at(2), this);
893   LIRItem dst_pos(x->argument_at(3), this);
894   LIRItem length(x->argument_at(4), this);
895 
896   // operands for arraycopy must use fixed registers, otherwise
897   // LinearScan will fail allocation (because arraycopy always needs a
898   // call)
899 
900   // The java calling convention will give us enough registers
901   // so that on the stub side the args will be perfect already.
902   // On the other slow/special case side we call C and the arg
903   // positions are not similar enough to pick one as the best.
904   // Also because the java calling convention is a "shifted" version
905   // of the C convention we can process the java args trivially into C
906   // args without worry of overwriting during the xfer
907 
908   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
909   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
910   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
911   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
912   length.load_item_force  (FrameMap::as_opr(j_rarg4));
913 
914   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
915 
916   set_no_result(x);
917 
918   int flags;
919   ciArrayKlass* expected_type;
920   arraycopy_helper(x, &flags, &expected_type);
921 
922   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
923 }
924 
do_update_CRC32(Intrinsic * x)925 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
926   assert(UseCRC32Intrinsics, "why are we here?");
927   // Make all state_for calls early since they can emit code
928   LIR_Opr result = rlock_result(x);
929   int flags = 0;
930   switch (x->id()) {
931     case vmIntrinsics::_updateCRC32: {
932       LIRItem crc(x->argument_at(0), this);
933       LIRItem val(x->argument_at(1), this);
934       // val is destroyed by update_crc32
935       val.set_destroys_register();
936       crc.load_item();
937       val.load_item();
938       __ update_crc32(crc.result(), val.result(), result);
939       break;
940     }
941     case vmIntrinsics::_updateBytesCRC32:
942     case vmIntrinsics::_updateByteBufferCRC32: {
943       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
944 
945       LIRItem crc(x->argument_at(0), this);
946       LIRItem buf(x->argument_at(1), this);
947       LIRItem off(x->argument_at(2), this);
948       LIRItem len(x->argument_at(3), this);
949       buf.load_item();
950       off.load_nonconstant();
951 
952       LIR_Opr index = off.result();
953       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
954       if(off.result()->is_constant()) {
955         index = LIR_OprFact::illegalOpr;
956        offset += off.result()->as_jint();
957       }
958       LIR_Opr base_op = buf.result();
959 
960       if (index->is_valid()) {
961         LIR_Opr tmp = new_register(T_LONG);
962         __ convert(Bytecodes::_i2l, index, tmp);
963         index = tmp;
964       }
965 
966       if (offset) {
967         LIR_Opr tmp = new_pointer_register();
968         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
969         base_op = tmp;
970         offset = 0;
971       }
972 
973       LIR_Address* a = new LIR_Address(base_op,
974                                        index,
975                                        offset,
976                                        T_BYTE);
977       BasicTypeList signature(3);
978       signature.append(T_INT);
979       signature.append(T_ADDRESS);
980       signature.append(T_INT);
981       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
982       const LIR_Opr result_reg = result_register_for(x->type());
983 
984       LIR_Opr addr = new_pointer_register();
985       __ leal(LIR_OprFact::address(a), addr);
986 
987       crc.load_item_force(cc->at(0));
988       __ move(addr, cc->at(1));
989       len.load_item_force(cc->at(2));
990 
991       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
992       __ move(result_reg, result);
993 
994       break;
995     }
996     default: {
997       ShouldNotReachHere();
998     }
999   }
1000 }
1001 
do_update_CRC32C(Intrinsic * x)1002 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1003   assert(UseCRC32CIntrinsics, "why are we here?");
1004   // Make all state_for calls early since they can emit code
1005   LIR_Opr result = rlock_result(x);
1006   int flags = 0;
1007   switch (x->id()) {
1008     case vmIntrinsics::_updateBytesCRC32C:
1009     case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1010       bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1011       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1012 
1013       LIRItem crc(x->argument_at(0), this);
1014       LIRItem buf(x->argument_at(1), this);
1015       LIRItem off(x->argument_at(2), this);
1016       LIRItem end(x->argument_at(3), this);
1017 
1018       buf.load_item();
1019       off.load_nonconstant();
1020       end.load_nonconstant();
1021 
1022       // len = end - off
1023       LIR_Opr len  = end.result();
1024       LIR_Opr tmpA = new_register(T_INT);
1025       LIR_Opr tmpB = new_register(T_INT);
1026       __ move(end.result(), tmpA);
1027       __ move(off.result(), tmpB);
1028       __ sub(tmpA, tmpB, tmpA);
1029       len = tmpA;
1030 
1031       LIR_Opr index = off.result();
1032       if(off.result()->is_constant()) {
1033         index = LIR_OprFact::illegalOpr;
1034         offset += off.result()->as_jint();
1035       }
1036       LIR_Opr base_op = buf.result();
1037 
1038       if (index->is_valid()) {
1039         LIR_Opr tmp = new_register(T_LONG);
1040         __ convert(Bytecodes::_i2l, index, tmp);
1041         index = tmp;
1042       }
1043 
1044       if (offset) {
1045         LIR_Opr tmp = new_pointer_register();
1046         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1047         base_op = tmp;
1048         offset = 0;
1049       }
1050 
1051       LIR_Address* a = new LIR_Address(base_op,
1052                                        index,
1053                                        offset,
1054                                        T_BYTE);
1055       BasicTypeList signature(3);
1056       signature.append(T_INT);
1057       signature.append(T_ADDRESS);
1058       signature.append(T_INT);
1059       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1060       const LIR_Opr result_reg = result_register_for(x->type());
1061 
1062       LIR_Opr addr = new_pointer_register();
1063       __ leal(LIR_OprFact::address(a), addr);
1064 
1065       crc.load_item_force(cc->at(0));
1066       __ move(addr, cc->at(1));
1067       __ move(len, cc->at(2));
1068 
1069       __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1070       __ move(result_reg, result);
1071 
1072       break;
1073     }
1074     default: {
1075       ShouldNotReachHere();
1076     }
1077   }
1078 }
1079 
do_FmaIntrinsic(Intrinsic * x)1080 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1081   assert(x->number_of_arguments() == 3, "wrong type");
1082   assert(UseFMA, "Needs FMA instructions support.");
1083   LIRItem value(x->argument_at(0), this);
1084   LIRItem value1(x->argument_at(1), this);
1085   LIRItem value2(x->argument_at(2), this);
1086 
1087   value.load_item();
1088   value1.load_item();
1089   value2.load_item();
1090 
1091   LIR_Opr calc_input = value.result();
1092   LIR_Opr calc_input1 = value1.result();
1093   LIR_Opr calc_input2 = value2.result();
1094   LIR_Opr calc_result = rlock_result(x);
1095 
1096   switch (x->id()) {
1097   case vmIntrinsics::_fmaD:   __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1098   case vmIntrinsics::_fmaF:   __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1099   default:                    ShouldNotReachHere();
1100   }
1101 }
1102 
do_vectorizedMismatch(Intrinsic * x)1103 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1104   fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1105 }
1106 
1107 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1108 // _i2b, _i2c, _i2s
do_Convert(Convert * x)1109 void LIRGenerator::do_Convert(Convert* x) {
1110   LIRItem value(x->value(), this);
1111   value.load_item();
1112   LIR_Opr input = value.result();
1113   LIR_Opr result = rlock(x);
1114 
1115   // arguments of lir_convert
1116   LIR_Opr conv_input = input;
1117   LIR_Opr conv_result = result;
1118   ConversionStub* stub = NULL;
1119 
1120   __ convert(x->op(), conv_input, conv_result);
1121 
1122   assert(result->is_virtual(), "result must be virtual register");
1123   set_result(x, result);
1124 }
1125 
do_NewInstance(NewInstance * x)1126 void LIRGenerator::do_NewInstance(NewInstance* x) {
1127 #ifndef PRODUCT
1128   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1129     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1130   }
1131 #endif
1132   CodeEmitInfo* info = state_for(x, x->state());
1133   LIR_Opr reg = result_register_for(x->type());
1134   new_instance(reg, x->klass(), x->is_unresolved(),
1135                        FrameMap::r2_oop_opr,
1136                        FrameMap::r5_oop_opr,
1137                        FrameMap::r4_oop_opr,
1138                        LIR_OprFact::illegalOpr,
1139                        FrameMap::r3_metadata_opr, info);
1140   LIR_Opr result = rlock_result(x);
1141   __ move(reg, result);
1142 }
1143 
do_NewTypeArray(NewTypeArray * x)1144 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1145   CodeEmitInfo* info = state_for(x, x->state());
1146 
1147   LIRItem length(x->length(), this);
1148   length.load_item_force(FrameMap::r19_opr);
1149 
1150   LIR_Opr reg = result_register_for(x->type());
1151   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1152   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1153   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1154   LIR_Opr tmp4 = reg;
1155   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1156   LIR_Opr len = length.result();
1157   BasicType elem_type = x->elt_type();
1158 
1159   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1160 
1161   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1162   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1163 
1164   LIR_Opr result = rlock_result(x);
1165   __ move(reg, result);
1166 }
1167 
do_NewObjectArray(NewObjectArray * x)1168 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1169   LIRItem length(x->length(), this);
1170   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1171   // and therefore provide the state before the parameters have been consumed
1172   CodeEmitInfo* patching_info = NULL;
1173   if (!x->klass()->is_loaded() || PatchALot) {
1174     patching_info =  state_for(x, x->state_before());
1175   }
1176 
1177   CodeEmitInfo* info = state_for(x, x->state());
1178 
1179   LIR_Opr reg = result_register_for(x->type());
1180   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1181   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1182   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1183   LIR_Opr tmp4 = reg;
1184   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1185 
1186   length.load_item_force(FrameMap::r19_opr);
1187   LIR_Opr len = length.result();
1188 
1189   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1190   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1191   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1192     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1193   }
1194   klass2reg_with_patching(klass_reg, obj, patching_info);
1195   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1196 
1197   LIR_Opr result = rlock_result(x);
1198   __ move(reg, result);
1199 }
1200 
1201 
do_NewMultiArray(NewMultiArray * x)1202 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1203   Values* dims = x->dims();
1204   int i = dims->length();
1205   LIRItemList* items = new LIRItemList(i, i, NULL);
1206   while (i-- > 0) {
1207     LIRItem* size = new LIRItem(dims->at(i), this);
1208     items->at_put(i, size);
1209   }
1210 
1211   // Evaluate state_for early since it may emit code.
1212   CodeEmitInfo* patching_info = NULL;
1213   if (!x->klass()->is_loaded() || PatchALot) {
1214     patching_info = state_for(x, x->state_before());
1215 
1216     // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1217     // clone all handlers (NOTE: Usually this is handled transparently
1218     // by the CodeEmitInfo cloning logic in CodeStub constructors but
1219     // is done explicitly here because a stub isn't being used).
1220     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1221   }
1222   CodeEmitInfo* info = state_for(x, x->state());
1223 
1224   i = dims->length();
1225   while (i-- > 0) {
1226     LIRItem* size = items->at(i);
1227     size->load_item();
1228 
1229     store_stack_parameter(size->result(), in_ByteSize(i*4));
1230   }
1231 
1232   LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1233   klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1234 
1235   LIR_Opr rank = FrameMap::r19_opr;
1236   __ move(LIR_OprFact::intConst(x->rank()), rank);
1237   LIR_Opr varargs = FrameMap::r2_opr;
1238   __ move(FrameMap::sp_opr, varargs);
1239   LIR_OprList* args = new LIR_OprList(3);
1240   args->append(klass_reg);
1241   args->append(rank);
1242   args->append(varargs);
1243   LIR_Opr reg = result_register_for(x->type());
1244   __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1245                   LIR_OprFact::illegalOpr,
1246                   reg, args, info);
1247 
1248   LIR_Opr result = rlock_result(x);
1249   __ move(reg, result);
1250 }
1251 
do_BlockBegin(BlockBegin * x)1252 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1253   // nothing to do for now
1254 }
1255 
do_CheckCast(CheckCast * x)1256 void LIRGenerator::do_CheckCast(CheckCast* x) {
1257   LIRItem obj(x->obj(), this);
1258 
1259   CodeEmitInfo* patching_info = NULL;
1260   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1261     // must do this before locking the destination register as an oop register,
1262     // and before the obj is loaded (the latter is for deoptimization)
1263     patching_info = state_for(x, x->state_before());
1264   }
1265   obj.load_item();
1266 
1267   // info for exceptions
1268   CodeEmitInfo* info_for_exception =
1269       (x->needs_exception_state() ? state_for(x) :
1270                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1271 
1272   CodeStub* stub;
1273   if (x->is_incompatible_class_change_check()) {
1274     assert(patching_info == NULL, "can't patch this");
1275     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1276   } else if (x->is_invokespecial_receiver_check()) {
1277     assert(patching_info == NULL, "can't patch this");
1278     stub = new DeoptimizeStub(info_for_exception,
1279                               Deoptimization::Reason_class_check,
1280                               Deoptimization::Action_none);
1281   } else {
1282     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1283   }
1284   LIR_Opr reg = rlock_result(x);
1285   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1286   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1287     tmp3 = new_register(objectType);
1288   }
1289   __ checkcast(reg, obj.result(), x->klass(),
1290                new_register(objectType), new_register(objectType), tmp3,
1291                x->direct_compare(), info_for_exception, patching_info, stub,
1292                x->profiled_method(), x->profiled_bci());
1293 }
1294 
do_InstanceOf(InstanceOf * x)1295 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1296   LIRItem obj(x->obj(), this);
1297 
1298   // result and test object may not be in same register
1299   LIR_Opr reg = rlock_result(x);
1300   CodeEmitInfo* patching_info = NULL;
1301   if ((!x->klass()->is_loaded() || PatchALot)) {
1302     // must do this before locking the destination register as an oop register
1303     patching_info = state_for(x, x->state_before());
1304   }
1305   obj.load_item();
1306   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1307   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1308     tmp3 = new_register(objectType);
1309   }
1310   __ instanceof(reg, obj.result(), x->klass(),
1311                 new_register(objectType), new_register(objectType), tmp3,
1312                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1313 }
1314 
do_If(If * x)1315 void LIRGenerator::do_If(If* x) {
1316   assert(x->number_of_sux() == 2, "inconsistency");
1317   ValueTag tag = x->x()->type()->tag();
1318   bool is_safepoint = x->is_safepoint();
1319 
1320   If::Condition cond = x->cond();
1321 
1322   LIRItem xitem(x->x(), this);
1323   LIRItem yitem(x->y(), this);
1324   LIRItem* xin = &xitem;
1325   LIRItem* yin = &yitem;
1326 
1327   if (tag == longTag) {
1328     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1329     // mirror for other conditions
1330     if (cond == If::gtr || cond == If::leq) {
1331       cond = Instruction::mirror(cond);
1332       xin = &yitem;
1333       yin = &xitem;
1334     }
1335     xin->set_destroys_register();
1336   }
1337   xin->load_item();
1338 
1339   if (tag == longTag) {
1340     if (yin->is_constant()
1341         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1342       yin->dont_load_item();
1343     } else {
1344       yin->load_item();
1345     }
1346   } else if (tag == intTag) {
1347     if (yin->is_constant()
1348         && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant()))  {
1349       yin->dont_load_item();
1350     } else {
1351       yin->load_item();
1352     }
1353   } else {
1354     yin->load_item();
1355   }
1356 
1357   set_no_result(x);
1358 
1359   LIR_Opr left = xin->result();
1360   LIR_Opr right = yin->result();
1361 
1362   // add safepoint before generating condition code so it can be recomputed
1363   if (x->is_safepoint()) {
1364     // increment backedge counter if needed
1365     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1366         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1367     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1368   }
1369 
1370   __ cmp(lir_cond(cond), left, right);
1371   // Generate branch profiling. Profiling code doesn't kill flags.
1372   profile_branch(x, cond);
1373   move_to_phi(x->state());
1374   if (x->x()->type()->is_float_kind()) {
1375     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1376   } else {
1377     __ branch(lir_cond(cond), right->type(), x->tsux());
1378   }
1379   assert(x->default_sux() == x->fsux(), "wrong destination above");
1380   __ jump(x->default_sux());
1381 }
1382 
getThreadPointer()1383 LIR_Opr LIRGenerator::getThreadPointer() {
1384    return FrameMap::as_pointer_opr(rthread);
1385 }
1386 
trace_block_entry(BlockBegin * block)1387 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1388 
volatile_field_store(LIR_Opr value,LIR_Address * address,CodeEmitInfo * info)1389 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1390                                         CodeEmitInfo* info) {
1391   __ volatile_store_mem_reg(value, address, info);
1392 }
1393 
volatile_field_load(LIR_Address * address,LIR_Opr result,CodeEmitInfo * info)1394 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1395                                        CodeEmitInfo* info) {
1396   // 8179954: We need to make sure that the code generated for
1397   // volatile accesses forms a sequentially-consistent set of
1398   // operations when combined with STLR and LDAR.  Without a leading
1399   // membar it's possible for a simple Dekker test to fail if loads
1400   // use LD;DMB but stores use STLR.  This can happen if C2 compiles
1401   // the stores in one method and C1 compiles the loads in another.
1402   if (! UseBarriersForVolatile) {
1403     __ membar();
1404   }
1405 
1406   __ volatile_load_mem_reg(address, result, info);
1407 }
1408