1 /*
2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
load_byte_item()50 void LIRItem::load_byte_item() {
51 load_item();
52 }
53
54
load_nonconstant()55 void LIRItem::load_nonconstant() {
56 LIR_Opr r = value()->operand();
57 if (r->is_constant()) {
58 _result = r;
59 } else {
60 load_item();
61 }
62 }
63
64 //--------------------------------------------------------------
65 // LIRGenerator
66 //--------------------------------------------------------------
67
68
exceptionOopOpr()69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
exceptionPcOpr()70 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }
divInOpr()71 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
divOutOpr()72 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
remOutOpr()73 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
shiftCountOpr()74 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
syncLockOpr()75 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
syncTempOpr()76 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
getThreadTemp()77 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
78
79
result_register_for(ValueType * type,bool callee)80 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
81 LIR_Opr opr;
82 switch (type->tag()) {
83 case intTag: opr = FrameMap::r0_opr; break;
84 case objectTag: opr = FrameMap::r0_oop_opr; break;
85 case longTag: opr = FrameMap::long0_opr; break;
86 case floatTag: opr = FrameMap::fpu0_float_opr; break;
87 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
88
89 case addressTag:
90 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
91 }
92
93 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
94 return opr;
95 }
96
97
rlock_byte(BasicType type)98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
99 LIR_Opr reg = new_register(T_INT);
100 set_vreg_flag(reg, LIRGenerator::byte_reg);
101 return reg;
102 }
103
104
105 //--------- loading items into registers --------------------------------
106
107
can_store_as_constant(Value v,BasicType type) const108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
109 if (v->type()->as_IntConstant() != NULL) {
110 return v->type()->as_IntConstant()->value() == 0L;
111 } else if (v->type()->as_LongConstant() != NULL) {
112 return v->type()->as_LongConstant()->value() == 0L;
113 } else if (v->type()->as_ObjectConstant() != NULL) {
114 return v->type()->as_ObjectConstant()->value()->is_null_object();
115 } else {
116 return false;
117 }
118 }
119
can_inline_as_constant(Value v) const120 bool LIRGenerator::can_inline_as_constant(Value v) const {
121 // FIXME: Just a guess
122 if (v->type()->as_IntConstant() != NULL) {
123 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
124 } else if (v->type()->as_LongConstant() != NULL) {
125 return v->type()->as_LongConstant()->value() == 0L;
126 } else if (v->type()->as_ObjectConstant() != NULL) {
127 return v->type()->as_ObjectConstant()->value()->is_null_object();
128 } else {
129 return false;
130 }
131 }
132
133
can_inline_as_constant(LIR_Const * c) const134 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
135
136
safepoint_poll_register()137 LIR_Opr LIRGenerator::safepoint_poll_register() {
138 return LIR_OprFact::illegalOpr;
139 }
140
141
generate_address(LIR_Opr base,LIR_Opr index,int shift,int disp,BasicType type)142 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
143 int shift, int disp, BasicType type) {
144 assert(base->is_register(), "must be");
145 intx large_disp = disp;
146
147 // accumulate fixed displacements
148 if (index->is_constant()) {
149 LIR_Const *constant = index->as_constant_ptr();
150 if (constant->type() == T_INT) {
151 large_disp += index->as_jint() << shift;
152 } else {
153 assert(constant->type() == T_LONG, "should be");
154 jlong c = index->as_jlong() << shift;
155 if ((jlong)((jint)c) == c) {
156 large_disp += c;
157 index = LIR_OprFact::illegalOpr;
158 } else {
159 LIR_Opr tmp = new_register(T_LONG);
160 __ move(index, tmp);
161 index = tmp;
162 // apply shift and displacement below
163 }
164 }
165 }
166
167 if (index->is_register()) {
168 // apply the shift and accumulate the displacement
169 if (shift > 0) {
170 LIR_Opr tmp = new_pointer_register();
171 __ shift_left(index, shift, tmp);
172 index = tmp;
173 }
174 if (large_disp != 0) {
175 LIR_Opr tmp = new_pointer_register();
176 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
177 __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp));
178 index = tmp;
179 } else {
180 __ move(tmp, LIR_OprFact::intptrConst(large_disp));
181 __ add(tmp, index, tmp);
182 index = tmp;
183 }
184 large_disp = 0;
185 }
186 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
187 // index is illegal so replace it with the displacement loaded into a register
188 index = new_pointer_register();
189 __ move(LIR_OprFact::intptrConst(large_disp), index);
190 large_disp = 0;
191 }
192
193 // at this point we either have base + index or base + displacement
194 if (large_disp == 0) {
195 return new LIR_Address(base, index, type);
196 } else {
197 assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
198 return new LIR_Address(base, large_disp, type);
199 }
200 }
201
emit_array_address(LIR_Opr array_opr,LIR_Opr index_opr,BasicType type)202 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
203 BasicType type) {
204 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
205 int elem_size = type2aelembytes(type);
206 int shift = exact_log2(elem_size);
207
208 LIR_Address* addr;
209 if (index_opr->is_constant()) {
210 addr = new LIR_Address(array_opr,
211 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
212 } else {
213 if (offset_in_bytes) {
214 LIR_Opr tmp = new_pointer_register();
215 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
216 array_opr = tmp;
217 offset_in_bytes = 0;
218 }
219 addr = new LIR_Address(array_opr,
220 index_opr,
221 LIR_Address::scale(type),
222 offset_in_bytes, type);
223 }
224 return addr;
225 }
226
load_immediate(int x,BasicType type)227 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
228 LIR_Opr r;
229 if (type == T_LONG) {
230 r = LIR_OprFact::longConst(x);
231 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
232 LIR_Opr tmp = new_register(type);
233 __ move(r, tmp);
234 return tmp;
235 }
236 } else if (type == T_INT) {
237 r = LIR_OprFact::intConst(x);
238 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
239 // This is all rather nasty. We don't know whether our constant
240 // is required for a logical or an arithmetic operation, wo we
241 // don't know what the range of valid values is!!
242 LIR_Opr tmp = new_register(type);
243 __ move(r, tmp);
244 return tmp;
245 }
246 } else {
247 ShouldNotReachHere();
248 r = NULL; // unreachable
249 }
250 return r;
251 }
252
253
254
increment_counter(address counter,BasicType type,int step)255 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
256 LIR_Opr pointer = new_pointer_register();
257 __ move(LIR_OprFact::intptrConst(counter), pointer);
258 LIR_Address* addr = new LIR_Address(pointer, type);
259 increment_counter(addr, step);
260 }
261
262
increment_counter(LIR_Address * addr,int step)263 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
264 LIR_Opr imm = NULL;
265 switch(addr->type()) {
266 case T_INT:
267 imm = LIR_OprFact::intConst(step);
268 break;
269 case T_LONG:
270 imm = LIR_OprFact::longConst(step);
271 break;
272 default:
273 ShouldNotReachHere();
274 }
275 LIR_Opr reg = new_register(addr->type());
276 __ load(addr, reg);
277 __ add(reg, imm, reg);
278 __ store(reg, addr);
279 }
280
cmp_mem_int(LIR_Condition condition,LIR_Opr base,int disp,int c,CodeEmitInfo * info)281 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
282 LIR_Opr reg = new_register(T_INT);
283 __ load(generate_address(base, disp, T_INT), reg, info);
284 __ cmp(condition, reg, LIR_OprFact::intConst(c));
285 }
286
cmp_reg_mem(LIR_Condition condition,LIR_Opr reg,LIR_Opr base,int disp,BasicType type,CodeEmitInfo * info)287 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
288 LIR_Opr reg1 = new_register(T_INT);
289 __ load(generate_address(base, disp, type), reg1, info);
290 __ cmp(condition, reg, reg1);
291 }
292
293
strength_reduce_multiply(LIR_Opr left,int c,LIR_Opr result,LIR_Opr tmp)294 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
295
296 if (is_power_of_2(c - 1)) {
297 __ shift_left(left, exact_log2(c - 1), tmp);
298 __ add(tmp, left, result);
299 return true;
300 } else if (is_power_of_2(c + 1)) {
301 __ shift_left(left, exact_log2(c + 1), tmp);
302 __ sub(tmp, left, result);
303 return true;
304 } else {
305 return false;
306 }
307 }
308
store_stack_parameter(LIR_Opr item,ByteSize offset_from_sp)309 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
310 BasicType type = item->type();
311 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
312 }
313
array_store_check(LIR_Opr value,LIR_Opr array,CodeEmitInfo * store_check_info,ciMethod * profiled_method,int profiled_bci)314 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
315 LIR_Opr tmp1 = new_register(objectType);
316 LIR_Opr tmp2 = new_register(objectType);
317 LIR_Opr tmp3 = new_register(objectType);
318 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
319 }
320
321 //----------------------------------------------------------------------
322 // visitor functions
323 //----------------------------------------------------------------------
324
do_MonitorEnter(MonitorEnter * x)325 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
326 assert(x->is_pinned(),"");
327 LIRItem obj(x->obj(), this);
328 obj.load_item();
329
330 set_no_result(x);
331
332 // "lock" stores the address of the monitor stack slot, so this is not an oop
333 LIR_Opr lock = new_register(T_INT);
334 // Need a scratch register for biased locking
335 LIR_Opr scratch = LIR_OprFact::illegalOpr;
336 if (UseBiasedLocking) {
337 scratch = new_register(T_INT);
338 }
339
340 CodeEmitInfo* info_for_exception = NULL;
341 if (x->needs_null_check()) {
342 info_for_exception = state_for(x);
343 }
344 // this CodeEmitInfo must not have the xhandlers because here the
345 // object is already locked (xhandlers expect object to be unlocked)
346 CodeEmitInfo* info = state_for(x, x->state(), true);
347 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
348 x->monitor_no(), info_for_exception, info);
349 }
350
351
do_MonitorExit(MonitorExit * x)352 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
353 assert(x->is_pinned(),"");
354
355 LIRItem obj(x->obj(), this);
356 obj.dont_load_item();
357
358 LIR_Opr lock = new_register(T_INT);
359 LIR_Opr obj_temp = new_register(T_INT);
360 set_no_result(x);
361 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
362 }
363
364
do_NegateOp(NegateOp * x)365 void LIRGenerator::do_NegateOp(NegateOp* x) {
366
367 LIRItem from(x->x(), this);
368 from.load_item();
369 LIR_Opr result = rlock_result(x);
370 __ negate (from.result(), result);
371
372 }
373
374 // for _fadd, _fmul, _fsub, _fdiv, _frem
375 // _dadd, _dmul, _dsub, _ddiv, _drem
do_ArithmeticOp_FPU(ArithmeticOp * x)376 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
377
378 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
379 // float remainder is implemented as a direct call into the runtime
380 LIRItem right(x->x(), this);
381 LIRItem left(x->y(), this);
382
383 BasicTypeList signature(2);
384 if (x->op() == Bytecodes::_frem) {
385 signature.append(T_FLOAT);
386 signature.append(T_FLOAT);
387 } else {
388 signature.append(T_DOUBLE);
389 signature.append(T_DOUBLE);
390 }
391 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
392
393 const LIR_Opr result_reg = result_register_for(x->type());
394 left.load_item_force(cc->at(1));
395 right.load_item();
396
397 __ move(right.result(), cc->at(0));
398
399 address entry;
400 if (x->op() == Bytecodes::_frem) {
401 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
402 } else {
403 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
404 }
405
406 LIR_Opr result = rlock_result(x);
407 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
408 __ move(result_reg, result);
409
410 return;
411 }
412
413 LIRItem left(x->x(), this);
414 LIRItem right(x->y(), this);
415 LIRItem* left_arg = &left;
416 LIRItem* right_arg = &right;
417
418 // Always load right hand side.
419 right.load_item();
420
421 if (!left.is_register())
422 left.load_item();
423
424 LIR_Opr reg = rlock(x);
425 LIR_Opr tmp = LIR_OprFact::illegalOpr;
426 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
427 tmp = new_register(T_DOUBLE);
428 }
429
430 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp());
431
432 set_result(x, round_item(reg));
433 }
434
435 // for _ladd, _lmul, _lsub, _ldiv, _lrem
do_ArithmeticOp_Long(ArithmeticOp * x)436 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
437
438 // missing test if instr is commutative and if we should swap
439 LIRItem left(x->x(), this);
440 LIRItem right(x->y(), this);
441
442 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
443
444 left.load_item();
445 bool need_zero_check = true;
446 if (right.is_constant()) {
447 jlong c = right.get_jlong_constant();
448 // no need to do div-by-zero check if the divisor is a non-zero constant
449 if (c != 0) need_zero_check = false;
450 // do not load right if the divisor is a power-of-2 constant
451 if (c > 0 && is_power_of_2(c)) {
452 right.dont_load_item();
453 } else {
454 right.load_item();
455 }
456 } else {
457 right.load_item();
458 }
459 if (need_zero_check) {
460 CodeEmitInfo* info = state_for(x);
461 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
462 __ branch(lir_cond_equal, new DivByZeroStub(info));
463 }
464
465 rlock_result(x);
466 switch (x->op()) {
467 case Bytecodes::_lrem:
468 __ rem (left.result(), right.result(), x->operand());
469 break;
470 case Bytecodes::_ldiv:
471 __ div (left.result(), right.result(), x->operand());
472 break;
473 default:
474 ShouldNotReachHere();
475 break;
476 }
477
478
479 } else {
480 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
481 "expect lmul, ladd or lsub");
482 // add, sub, mul
483 left.load_item();
484 if (! right.is_register()) {
485 if (x->op() == Bytecodes::_lmul
486 || ! right.is_constant()
487 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
488 right.load_item();
489 } else { // add, sub
490 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
491 // don't load constants to save register
492 right.load_nonconstant();
493 }
494 }
495 rlock_result(x);
496 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
497 }
498 }
499
500 // for: _iadd, _imul, _isub, _idiv, _irem
do_ArithmeticOp_Int(ArithmeticOp * x)501 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
502
503 // Test if instr is commutative and if we should swap
504 LIRItem left(x->x(), this);
505 LIRItem right(x->y(), this);
506 LIRItem* left_arg = &left;
507 LIRItem* right_arg = &right;
508 if (x->is_commutative() && left.is_stack() && right.is_register()) {
509 // swap them if left is real stack (or cached) and right is real register(not cached)
510 left_arg = &right;
511 right_arg = &left;
512 }
513
514 left_arg->load_item();
515
516 // do not need to load right, as we can handle stack and constants
517 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
518
519 rlock_result(x);
520 bool need_zero_check = true;
521 if (right.is_constant()) {
522 jint c = right.get_jint_constant();
523 // no need to do div-by-zero check if the divisor is a non-zero constant
524 if (c != 0) need_zero_check = false;
525 // do not load right if the divisor is a power-of-2 constant
526 if (c > 0 && is_power_of_2(c)) {
527 right_arg->dont_load_item();
528 } else {
529 right_arg->load_item();
530 }
531 } else {
532 right_arg->load_item();
533 }
534 if (need_zero_check) {
535 CodeEmitInfo* info = state_for(x);
536 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
537 __ branch(lir_cond_equal, new DivByZeroStub(info));
538 }
539
540 LIR_Opr ill = LIR_OprFact::illegalOpr;
541 if (x->op() == Bytecodes::_irem) {
542 __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
543 } else if (x->op() == Bytecodes::_idiv) {
544 __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
545 }
546
547 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
548 if (right.is_constant()
549 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
550 right.load_nonconstant();
551 } else {
552 right.load_item();
553 }
554 rlock_result(x);
555 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
556 } else {
557 assert (x->op() == Bytecodes::_imul, "expect imul");
558 if (right.is_constant()) {
559 jint c = right.get_jint_constant();
560 if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
561 right_arg->dont_load_item();
562 } else {
563 // Cannot use constant op.
564 right_arg->load_item();
565 }
566 } else {
567 right.load_item();
568 }
569 rlock_result(x);
570 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
571 }
572 }
573
do_ArithmeticOp(ArithmeticOp * x)574 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
575 // when an operand with use count 1 is the left operand, then it is
576 // likely that no move for 2-operand-LIR-form is necessary
577 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
578 x->swap_operands();
579 }
580
581 ValueTag tag = x->type()->tag();
582 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
583 switch (tag) {
584 case floatTag:
585 case doubleTag: do_ArithmeticOp_FPU(x); return;
586 case longTag: do_ArithmeticOp_Long(x); return;
587 case intTag: do_ArithmeticOp_Int(x); return;
588 default: ShouldNotReachHere(); return;
589 }
590 }
591
592 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
do_ShiftOp(ShiftOp * x)593 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
594
595 LIRItem left(x->x(), this);
596 LIRItem right(x->y(), this);
597
598 left.load_item();
599
600 rlock_result(x);
601 if (right.is_constant()) {
602 right.dont_load_item();
603
604 switch (x->op()) {
605 case Bytecodes::_ishl: {
606 int c = right.get_jint_constant() & 0x1f;
607 __ shift_left(left.result(), c, x->operand());
608 break;
609 }
610 case Bytecodes::_ishr: {
611 int c = right.get_jint_constant() & 0x1f;
612 __ shift_right(left.result(), c, x->operand());
613 break;
614 }
615 case Bytecodes::_iushr: {
616 int c = right.get_jint_constant() & 0x1f;
617 __ unsigned_shift_right(left.result(), c, x->operand());
618 break;
619 }
620 case Bytecodes::_lshl: {
621 int c = right.get_jint_constant() & 0x3f;
622 __ shift_left(left.result(), c, x->operand());
623 break;
624 }
625 case Bytecodes::_lshr: {
626 int c = right.get_jint_constant() & 0x3f;
627 __ shift_right(left.result(), c, x->operand());
628 break;
629 }
630 case Bytecodes::_lushr: {
631 int c = right.get_jint_constant() & 0x3f;
632 __ unsigned_shift_right(left.result(), c, x->operand());
633 break;
634 }
635 default:
636 ShouldNotReachHere();
637 }
638 } else {
639 right.load_item();
640 LIR_Opr tmp = new_register(T_INT);
641 switch (x->op()) {
642 case Bytecodes::_ishl: {
643 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
644 __ shift_left(left.result(), tmp, x->operand(), tmp);
645 break;
646 }
647 case Bytecodes::_ishr: {
648 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
649 __ shift_right(left.result(), tmp, x->operand(), tmp);
650 break;
651 }
652 case Bytecodes::_iushr: {
653 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
654 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
655 break;
656 }
657 case Bytecodes::_lshl: {
658 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
659 __ shift_left(left.result(), tmp, x->operand(), tmp);
660 break;
661 }
662 case Bytecodes::_lshr: {
663 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
664 __ shift_right(left.result(), tmp, x->operand(), tmp);
665 break;
666 }
667 case Bytecodes::_lushr: {
668 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
669 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
670 break;
671 }
672 default:
673 ShouldNotReachHere();
674 }
675 }
676 }
677
678 // _iand, _land, _ior, _lor, _ixor, _lxor
do_LogicOp(LogicOp * x)679 void LIRGenerator::do_LogicOp(LogicOp* x) {
680
681 LIRItem left(x->x(), this);
682 LIRItem right(x->y(), this);
683
684 left.load_item();
685
686 rlock_result(x);
687 if (right.is_constant()
688 && ((right.type()->tag() == intTag
689 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
690 || (right.type()->tag() == longTag
691 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {
692 right.dont_load_item();
693 } else {
694 right.load_item();
695 }
696 switch (x->op()) {
697 case Bytecodes::_iand:
698 case Bytecodes::_land:
699 __ logical_and(left.result(), right.result(), x->operand()); break;
700 case Bytecodes::_ior:
701 case Bytecodes::_lor:
702 __ logical_or (left.result(), right.result(), x->operand()); break;
703 case Bytecodes::_ixor:
704 case Bytecodes::_lxor:
705 __ logical_xor(left.result(), right.result(), x->operand()); break;
706 default: Unimplemented();
707 }
708 }
709
710 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
do_CompareOp(CompareOp * x)711 void LIRGenerator::do_CompareOp(CompareOp* x) {
712 LIRItem left(x->x(), this);
713 LIRItem right(x->y(), this);
714 ValueTag tag = x->x()->type()->tag();
715 if (tag == longTag) {
716 left.set_destroys_register();
717 }
718 left.load_item();
719 right.load_item();
720 LIR_Opr reg = rlock_result(x);
721
722 if (x->x()->type()->is_float_kind()) {
723 Bytecodes::Code code = x->op();
724 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
725 } else if (x->x()->type()->tag() == longTag) {
726 __ lcmp2int(left.result(), right.result(), reg);
727 } else {
728 Unimplemented();
729 }
730 }
731
atomic_cmpxchg(BasicType type,LIR_Opr addr,LIRItem & cmp_value,LIRItem & new_value)732 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
733 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
734 new_value.load_item();
735 cmp_value.load_item();
736 LIR_Opr result = new_register(T_INT);
737 if (is_reference_type(type)) {
738 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
739 } else if (type == T_INT) {
740 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
741 } else if (type == T_LONG) {
742 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
743 } else {
744 ShouldNotReachHere();
745 Unimplemented();
746 }
747 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
748 return result;
749 }
750
atomic_xchg(BasicType type,LIR_Opr addr,LIRItem & value)751 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
752 bool is_oop = is_reference_type(type);
753 LIR_Opr result = new_register(type);
754 value.load_item();
755 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
756 LIR_Opr tmp = new_register(T_INT);
757 __ xchg(addr, value.result(), result, tmp);
758 return result;
759 }
760
atomic_add(BasicType type,LIR_Opr addr,LIRItem & value)761 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
762 LIR_Opr result = new_register(type);
763 value.load_item();
764 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
765 LIR_Opr tmp = new_register(T_INT);
766 __ xadd(addr, value.result(), result, tmp);
767 return result;
768 }
769
do_MathIntrinsic(Intrinsic * x)770 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
771 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
772 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
773 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
774 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
775 x->id() == vmIntrinsics::_dlog10) {
776 do_LibmIntrinsic(x);
777 return;
778 }
779 switch (x->id()) {
780 case vmIntrinsics::_dabs:
781 case vmIntrinsics::_dsqrt: {
782 assert(x->number_of_arguments() == 1, "wrong type");
783 LIRItem value(x->argument_at(0), this);
784 value.load_item();
785 LIR_Opr dst = rlock_result(x);
786
787 switch (x->id()) {
788 case vmIntrinsics::_dsqrt: {
789 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
790 break;
791 }
792 case vmIntrinsics::_dabs: {
793 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
794 break;
795 }
796 default:
797 ShouldNotReachHere();
798 }
799 break;
800 }
801 default:
802 ShouldNotReachHere();
803 }
804 }
805
do_LibmIntrinsic(Intrinsic * x)806 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
807 LIRItem value(x->argument_at(0), this);
808 value.set_destroys_register();
809
810 LIR_Opr calc_result = rlock_result(x);
811 LIR_Opr result_reg = result_register_for(x->type());
812
813 CallingConvention* cc = NULL;
814
815 if (x->id() == vmIntrinsics::_dpow) {
816 LIRItem value1(x->argument_at(1), this);
817
818 value1.set_destroys_register();
819
820 BasicTypeList signature(2);
821 signature.append(T_DOUBLE);
822 signature.append(T_DOUBLE);
823 cc = frame_map()->c_calling_convention(&signature);
824 value.load_item_force(cc->at(0));
825 value1.load_item_force(cc->at(1));
826 } else {
827 BasicTypeList signature(1);
828 signature.append(T_DOUBLE);
829 cc = frame_map()->c_calling_convention(&signature);
830 value.load_item_force(cc->at(0));
831 }
832
833 switch (x->id()) {
834 case vmIntrinsics::_dexp:
835 if (StubRoutines::dexp() != NULL) {
836 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
837 } else {
838 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
839 }
840 break;
841 case vmIntrinsics::_dlog:
842 if (StubRoutines::dlog() != NULL) {
843 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
844 } else {
845 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
846 }
847 break;
848 case vmIntrinsics::_dlog10:
849 if (StubRoutines::dlog10() != NULL) {
850 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
851 } else {
852 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
853 }
854 break;
855 case vmIntrinsics::_dpow:
856 if (StubRoutines::dpow() != NULL) {
857 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
858 } else {
859 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
860 }
861 break;
862 case vmIntrinsics::_dsin:
863 if (StubRoutines::dsin() != NULL) {
864 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
865 } else {
866 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
867 }
868 break;
869 case vmIntrinsics::_dcos:
870 if (StubRoutines::dcos() != NULL) {
871 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
872 } else {
873 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
874 }
875 break;
876 case vmIntrinsics::_dtan:
877 if (StubRoutines::dtan() != NULL) {
878 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
879 } else {
880 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
881 }
882 break;
883 default: ShouldNotReachHere();
884 }
885 __ move(result_reg, calc_result);
886 }
887
888
do_ArrayCopy(Intrinsic * x)889 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
890 assert(x->number_of_arguments() == 5, "wrong type");
891
892 // Make all state_for calls early since they can emit code
893 CodeEmitInfo* info = state_for(x, x->state());
894
895 LIRItem src(x->argument_at(0), this);
896 LIRItem src_pos(x->argument_at(1), this);
897 LIRItem dst(x->argument_at(2), this);
898 LIRItem dst_pos(x->argument_at(3), this);
899 LIRItem length(x->argument_at(4), this);
900
901 // operands for arraycopy must use fixed registers, otherwise
902 // LinearScan will fail allocation (because arraycopy always needs a
903 // call)
904
905 // The java calling convention will give us enough registers
906 // so that on the stub side the args will be perfect already.
907 // On the other slow/special case side we call C and the arg
908 // positions are not similar enough to pick one as the best.
909 // Also because the java calling convention is a "shifted" version
910 // of the C convention we can process the java args trivially into C
911 // args without worry of overwriting during the xfer
912
913 src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
914 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
915 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
916 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
917 length.load_item_force (FrameMap::as_opr(j_rarg4));
918
919 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
920
921 set_no_result(x);
922
923 int flags;
924 ciArrayKlass* expected_type;
925 arraycopy_helper(x, &flags, &expected_type);
926
927 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
928 }
929
do_update_CRC32(Intrinsic * x)930 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
931 assert(UseCRC32Intrinsics, "why are we here?");
932 // Make all state_for calls early since they can emit code
933 LIR_Opr result = rlock_result(x);
934 int flags = 0;
935 switch (x->id()) {
936 case vmIntrinsics::_updateCRC32: {
937 LIRItem crc(x->argument_at(0), this);
938 LIRItem val(x->argument_at(1), this);
939 // val is destroyed by update_crc32
940 val.set_destroys_register();
941 crc.load_item();
942 val.load_item();
943 __ update_crc32(crc.result(), val.result(), result);
944 break;
945 }
946 case vmIntrinsics::_updateBytesCRC32:
947 case vmIntrinsics::_updateByteBufferCRC32: {
948 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
949
950 LIRItem crc(x->argument_at(0), this);
951 LIRItem buf(x->argument_at(1), this);
952 LIRItem off(x->argument_at(2), this);
953 LIRItem len(x->argument_at(3), this);
954 buf.load_item();
955 off.load_nonconstant();
956
957 LIR_Opr index = off.result();
958 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
959 if(off.result()->is_constant()) {
960 index = LIR_OprFact::illegalOpr;
961 offset += off.result()->as_jint();
962 }
963 LIR_Opr base_op = buf.result();
964
965 if (index->is_valid()) {
966 LIR_Opr tmp = new_register(T_LONG);
967 __ convert(Bytecodes::_i2l, index, tmp);
968 index = tmp;
969 }
970
971 if (is_updateBytes) {
972 base_op = access_resolve(ACCESS_READ, base_op);
973 }
974
975 if (offset) {
976 LIR_Opr tmp = new_pointer_register();
977 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
978 base_op = tmp;
979 offset = 0;
980 }
981
982 LIR_Address* a = new LIR_Address(base_op,
983 index,
984 offset,
985 T_BYTE);
986 BasicTypeList signature(3);
987 signature.append(T_INT);
988 signature.append(T_ADDRESS);
989 signature.append(T_INT);
990 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
991 const LIR_Opr result_reg = result_register_for(x->type());
992
993 LIR_Opr addr = new_pointer_register();
994 __ leal(LIR_OprFact::address(a), addr);
995
996 crc.load_item_force(cc->at(0));
997 __ move(addr, cc->at(1));
998 len.load_item_force(cc->at(2));
999
1000 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1001 __ move(result_reg, result);
1002
1003 break;
1004 }
1005 default: {
1006 ShouldNotReachHere();
1007 }
1008 }
1009 }
1010
do_update_CRC32C(Intrinsic * x)1011 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1012 assert(UseCRC32CIntrinsics, "why are we here?");
1013 // Make all state_for calls early since they can emit code
1014 LIR_Opr result = rlock_result(x);
1015 int flags = 0;
1016 switch (x->id()) {
1017 case vmIntrinsics::_updateBytesCRC32C:
1018 case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1019 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1020 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1021
1022 LIRItem crc(x->argument_at(0), this);
1023 LIRItem buf(x->argument_at(1), this);
1024 LIRItem off(x->argument_at(2), this);
1025 LIRItem end(x->argument_at(3), this);
1026
1027 buf.load_item();
1028 off.load_nonconstant();
1029 end.load_nonconstant();
1030
1031 // len = end - off
1032 LIR_Opr len = end.result();
1033 LIR_Opr tmpA = new_register(T_INT);
1034 LIR_Opr tmpB = new_register(T_INT);
1035 __ move(end.result(), tmpA);
1036 __ move(off.result(), tmpB);
1037 __ sub(tmpA, tmpB, tmpA);
1038 len = tmpA;
1039
1040 LIR_Opr index = off.result();
1041 if(off.result()->is_constant()) {
1042 index = LIR_OprFact::illegalOpr;
1043 offset += off.result()->as_jint();
1044 }
1045 LIR_Opr base_op = buf.result();
1046
1047 if (index->is_valid()) {
1048 LIR_Opr tmp = new_register(T_LONG);
1049 __ convert(Bytecodes::_i2l, index, tmp);
1050 index = tmp;
1051 }
1052
1053 if (is_updateBytes) {
1054 base_op = access_resolve(ACCESS_READ, base_op);
1055 }
1056
1057 if (offset) {
1058 LIR_Opr tmp = new_pointer_register();
1059 __ add(base_op, LIR_OprFact::intConst(offset), tmp);
1060 base_op = tmp;
1061 offset = 0;
1062 }
1063
1064 LIR_Address* a = new LIR_Address(base_op,
1065 index,
1066 offset,
1067 T_BYTE);
1068 BasicTypeList signature(3);
1069 signature.append(T_INT);
1070 signature.append(T_ADDRESS);
1071 signature.append(T_INT);
1072 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1073 const LIR_Opr result_reg = result_register_for(x->type());
1074
1075 LIR_Opr addr = new_pointer_register();
1076 __ leal(LIR_OprFact::address(a), addr);
1077
1078 crc.load_item_force(cc->at(0));
1079 __ move(addr, cc->at(1));
1080 __ move(len, cc->at(2));
1081
1082 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1083 __ move(result_reg, result);
1084
1085 break;
1086 }
1087 default: {
1088 ShouldNotReachHere();
1089 }
1090 }
1091 }
1092
do_FmaIntrinsic(Intrinsic * x)1093 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1094 assert(x->number_of_arguments() == 3, "wrong type");
1095 assert(UseFMA, "Needs FMA instructions support.");
1096 LIRItem value(x->argument_at(0), this);
1097 LIRItem value1(x->argument_at(1), this);
1098 LIRItem value2(x->argument_at(2), this);
1099
1100 value.load_item();
1101 value1.load_item();
1102 value2.load_item();
1103
1104 LIR_Opr calc_input = value.result();
1105 LIR_Opr calc_input1 = value1.result();
1106 LIR_Opr calc_input2 = value2.result();
1107 LIR_Opr calc_result = rlock_result(x);
1108
1109 switch (x->id()) {
1110 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1111 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1112 default: ShouldNotReachHere();
1113 }
1114 }
1115
do_vectorizedMismatch(Intrinsic * x)1116 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1117 fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1118 }
1119
1120 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1121 // _i2b, _i2c, _i2s
do_Convert(Convert * x)1122 void LIRGenerator::do_Convert(Convert* x) {
1123 LIRItem value(x->value(), this);
1124 value.load_item();
1125 LIR_Opr input = value.result();
1126 LIR_Opr result = rlock(x);
1127
1128 // arguments of lir_convert
1129 LIR_Opr conv_input = input;
1130 LIR_Opr conv_result = result;
1131
1132 __ convert(x->op(), conv_input, conv_result);
1133
1134 assert(result->is_virtual(), "result must be virtual register");
1135 set_result(x, result);
1136 }
1137
do_NewInstance(NewInstance * x)1138 void LIRGenerator::do_NewInstance(NewInstance* x) {
1139 #ifndef PRODUCT
1140 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1141 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1142 }
1143 #endif
1144 CodeEmitInfo* info = state_for(x, x->state());
1145 LIR_Opr reg = result_register_for(x->type());
1146 new_instance(reg, x->klass(), x->is_unresolved(),
1147 FrameMap::r2_oop_opr,
1148 FrameMap::r5_oop_opr,
1149 FrameMap::r4_oop_opr,
1150 LIR_OprFact::illegalOpr,
1151 FrameMap::r3_metadata_opr, info);
1152 LIR_Opr result = rlock_result(x);
1153 __ move(reg, result);
1154 }
1155
do_NewTypeArray(NewTypeArray * x)1156 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1157 CodeEmitInfo* info = state_for(x, x->state());
1158
1159 LIRItem length(x->length(), this);
1160 length.load_item_force(FrameMap::r19_opr);
1161
1162 LIR_Opr reg = result_register_for(x->type());
1163 LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1164 LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1165 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1166 LIR_Opr tmp4 = reg;
1167 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1168 LIR_Opr len = length.result();
1169 BasicType elem_type = x->elt_type();
1170
1171 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1172
1173 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1174 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1175
1176 LIR_Opr result = rlock_result(x);
1177 __ move(reg, result);
1178 }
1179
do_NewObjectArray(NewObjectArray * x)1180 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1181 LIRItem length(x->length(), this);
1182 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1183 // and therefore provide the state before the parameters have been consumed
1184 CodeEmitInfo* patching_info = NULL;
1185 if (!x->klass()->is_loaded() || PatchALot) {
1186 patching_info = state_for(x, x->state_before());
1187 }
1188
1189 CodeEmitInfo* info = state_for(x, x->state());
1190
1191 LIR_Opr reg = result_register_for(x->type());
1192 LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1193 LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1194 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1195 LIR_Opr tmp4 = reg;
1196 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1197
1198 length.load_item_force(FrameMap::r19_opr);
1199 LIR_Opr len = length.result();
1200
1201 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1202 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1203 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1204 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1205 }
1206 klass2reg_with_patching(klass_reg, obj, patching_info);
1207 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1208
1209 LIR_Opr result = rlock_result(x);
1210 __ move(reg, result);
1211 }
1212
1213
do_NewMultiArray(NewMultiArray * x)1214 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1215 Values* dims = x->dims();
1216 int i = dims->length();
1217 LIRItemList* items = new LIRItemList(i, i, NULL);
1218 while (i-- > 0) {
1219 LIRItem* size = new LIRItem(dims->at(i), this);
1220 items->at_put(i, size);
1221 }
1222
1223 // Evaluate state_for early since it may emit code.
1224 CodeEmitInfo* patching_info = NULL;
1225 if (!x->klass()->is_loaded() || PatchALot) {
1226 patching_info = state_for(x, x->state_before());
1227
1228 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1229 // clone all handlers (NOTE: Usually this is handled transparently
1230 // by the CodeEmitInfo cloning logic in CodeStub constructors but
1231 // is done explicitly here because a stub isn't being used).
1232 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1233 }
1234 CodeEmitInfo* info = state_for(x, x->state());
1235
1236 i = dims->length();
1237 while (i-- > 0) {
1238 LIRItem* size = items->at(i);
1239 size->load_item();
1240
1241 store_stack_parameter(size->result(), in_ByteSize(i*4));
1242 }
1243
1244 LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1245 klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1246
1247 LIR_Opr rank = FrameMap::r19_opr;
1248 __ move(LIR_OprFact::intConst(x->rank()), rank);
1249 LIR_Opr varargs = FrameMap::r2_opr;
1250 __ move(FrameMap::sp_opr, varargs);
1251 LIR_OprList* args = new LIR_OprList(3);
1252 args->append(klass_reg);
1253 args->append(rank);
1254 args->append(varargs);
1255 LIR_Opr reg = result_register_for(x->type());
1256 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1257 LIR_OprFact::illegalOpr,
1258 reg, args, info);
1259
1260 LIR_Opr result = rlock_result(x);
1261 __ move(reg, result);
1262 }
1263
do_BlockBegin(BlockBegin * x)1264 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1265 // nothing to do for now
1266 }
1267
do_CheckCast(CheckCast * x)1268 void LIRGenerator::do_CheckCast(CheckCast* x) {
1269 LIRItem obj(x->obj(), this);
1270
1271 CodeEmitInfo* patching_info = NULL;
1272 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1273 // must do this before locking the destination register as an oop register,
1274 // and before the obj is loaded (the latter is for deoptimization)
1275 patching_info = state_for(x, x->state_before());
1276 }
1277 obj.load_item();
1278
1279 // info for exceptions
1280 CodeEmitInfo* info_for_exception =
1281 (x->needs_exception_state() ? state_for(x) :
1282 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1283
1284 CodeStub* stub;
1285 if (x->is_incompatible_class_change_check()) {
1286 assert(patching_info == NULL, "can't patch this");
1287 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1288 } else if (x->is_invokespecial_receiver_check()) {
1289 assert(patching_info == NULL, "can't patch this");
1290 stub = new DeoptimizeStub(info_for_exception,
1291 Deoptimization::Reason_class_check,
1292 Deoptimization::Action_none);
1293 } else {
1294 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1295 }
1296 LIR_Opr reg = rlock_result(x);
1297 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1298 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1299 tmp3 = new_register(objectType);
1300 }
1301 __ checkcast(reg, obj.result(), x->klass(),
1302 new_register(objectType), new_register(objectType), tmp3,
1303 x->direct_compare(), info_for_exception, patching_info, stub,
1304 x->profiled_method(), x->profiled_bci());
1305 }
1306
do_InstanceOf(InstanceOf * x)1307 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1308 LIRItem obj(x->obj(), this);
1309
1310 // result and test object may not be in same register
1311 LIR_Opr reg = rlock_result(x);
1312 CodeEmitInfo* patching_info = NULL;
1313 if ((!x->klass()->is_loaded() || PatchALot)) {
1314 // must do this before locking the destination register as an oop register
1315 patching_info = state_for(x, x->state_before());
1316 }
1317 obj.load_item();
1318 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1319 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1320 tmp3 = new_register(objectType);
1321 }
1322 __ instanceof(reg, obj.result(), x->klass(),
1323 new_register(objectType), new_register(objectType), tmp3,
1324 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1325 }
1326
do_If(If * x)1327 void LIRGenerator::do_If(If* x) {
1328 assert(x->number_of_sux() == 2, "inconsistency");
1329 ValueTag tag = x->x()->type()->tag();
1330 bool is_safepoint = x->is_safepoint();
1331
1332 If::Condition cond = x->cond();
1333
1334 LIRItem xitem(x->x(), this);
1335 LIRItem yitem(x->y(), this);
1336 LIRItem* xin = &xitem;
1337 LIRItem* yin = &yitem;
1338
1339 if (tag == longTag) {
1340 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1341 // mirror for other conditions
1342 if (cond == If::gtr || cond == If::leq) {
1343 cond = Instruction::mirror(cond);
1344 xin = &yitem;
1345 yin = &xitem;
1346 }
1347 xin->set_destroys_register();
1348 }
1349 xin->load_item();
1350
1351 if (tag == longTag) {
1352 if (yin->is_constant()
1353 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1354 yin->dont_load_item();
1355 } else {
1356 yin->load_item();
1357 }
1358 } else if (tag == intTag) {
1359 if (yin->is_constant()
1360 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1361 yin->dont_load_item();
1362 } else {
1363 yin->load_item();
1364 }
1365 } else {
1366 yin->load_item();
1367 }
1368
1369 set_no_result(x);
1370
1371 LIR_Opr left = xin->result();
1372 LIR_Opr right = yin->result();
1373
1374 // add safepoint before generating condition code so it can be recomputed
1375 if (x->is_safepoint()) {
1376 // increment backedge counter if needed
1377 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1378 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1379 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1380 }
1381
1382 __ cmp(lir_cond(cond), left, right);
1383 // Generate branch profiling. Profiling code doesn't kill flags.
1384 profile_branch(x, cond);
1385 move_to_phi(x->state());
1386 if (x->x()->type()->is_float_kind()) {
1387 __ branch(lir_cond(cond), x->tsux(), x->usux());
1388 } else {
1389 __ branch(lir_cond(cond), x->tsux());
1390 }
1391 assert(x->default_sux() == x->fsux(), "wrong destination above");
1392 __ jump(x->default_sux());
1393 }
1394
getThreadPointer()1395 LIR_Opr LIRGenerator::getThreadPointer() {
1396 return FrameMap::as_pointer_opr(rthread);
1397 }
1398
trace_block_entry(BlockBegin * block)1399 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1400
volatile_field_store(LIR_Opr value,LIR_Address * address,CodeEmitInfo * info)1401 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1402 CodeEmitInfo* info) {
1403 __ volatile_store_mem_reg(value, address, info);
1404 }
1405
volatile_field_load(LIR_Address * address,LIR_Opr result,CodeEmitInfo * info)1406 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1407 CodeEmitInfo* info) {
1408 // 8179954: We need to make sure that the code generated for
1409 // volatile accesses forms a sequentially-consistent set of
1410 // operations when combined with STLR and LDAR. Without a leading
1411 // membar it's possible for a simple Dekker test to fail if loads
1412 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1413 // the stores in one method and C1 compiles the loads in another.
1414 if (!is_c1_or_interpreter_only()) {
1415 __ membar();
1416 }
1417 __ volatile_load_mem_reg(address, result, info);
1418 }
1419