1 /*
2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciObjArrayKlass.hpp"
35 #include "ci/ciTypeArrayKlass.hpp"
36 #include "gc/shared/c1/barrierSetC1.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/powerOfTwo.hpp"
40 #include "vmreg_x86.inline.hpp"
41
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47
48 // Item will be loaded into a byte register; Intel only
load_byte_item()49 void LIRItem::load_byte_item() {
50 load_item();
51 LIR_Opr res = result();
52
53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
54 // make sure that it is a byte register
55 assert(!value()->type()->is_float() && !value()->type()->is_double(),
56 "can't load floats in byte register");
57 LIR_Opr reg = _gen->rlock_byte(T_BYTE);
58 __ move(res, reg);
59
60 _result = reg;
61 }
62 }
63
64
load_nonconstant()65 void LIRItem::load_nonconstant() {
66 LIR_Opr r = value()->operand();
67 if (r->is_constant()) {
68 _result = r;
69 } else {
70 load_item();
71 }
72 }
73
74 //--------------------------------------------------------------
75 // LIRGenerator
76 //--------------------------------------------------------------
77
78
exceptionOopOpr()79 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; }
exceptionPcOpr()80 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; }
divInOpr()81 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; }
divOutOpr()82 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; }
remOutOpr()83 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; }
shiftCountOpr()84 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; }
syncLockOpr()85 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
syncTempOpr()86 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; }
getThreadTemp()87 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
88
89
result_register_for(ValueType * type,bool callee)90 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
91 LIR_Opr opr;
92 switch (type->tag()) {
93 case intTag: opr = FrameMap::rax_opr; break;
94 case objectTag: opr = FrameMap::rax_oop_opr; break;
95 case longTag: opr = FrameMap::long0_opr; break;
96 #ifdef _LP64
97 case floatTag: opr = FrameMap::xmm0_float_opr; break;
98 case doubleTag: opr = FrameMap::xmm0_double_opr; break;
99 #else
100 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
101 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
102 #endif // _LP64
103 case addressTag:
104 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
105 }
106
107 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
108 return opr;
109 }
110
111
rlock_byte(BasicType type)112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
113 LIR_Opr reg = new_register(T_INT);
114 set_vreg_flag(reg, LIRGenerator::byte_reg);
115 return reg;
116 }
117
118
119 //--------- loading items into registers --------------------------------
120
121
122 // i486 instructions can inline constants
can_store_as_constant(Value v,BasicType type) const123 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
124 if (type == T_SHORT || type == T_CHAR) {
125 // there is no immediate move of word values in asembler_i486.?pp
126 return false;
127 }
128 Constant* c = v->as_Constant();
129 if (c && c->state_before() == NULL) {
130 // constants of any type can be stored directly, except for
131 // unloaded object constants.
132 return true;
133 }
134 return false;
135 }
136
137
can_inline_as_constant(Value v) const138 bool LIRGenerator::can_inline_as_constant(Value v) const {
139 if (v->type()->tag() == longTag) return false;
140 return v->type()->tag() != objectTag ||
141 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
142 }
143
144
can_inline_as_constant(LIR_Const * c) const145 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
146 if (c->type() == T_LONG) return false;
147 return c->type() != T_OBJECT || c->as_jobject() == NULL;
148 }
149
150
safepoint_poll_register()151 LIR_Opr LIRGenerator::safepoint_poll_register() {
152 NOT_LP64( return new_register(T_ADDRESS); )
153 return LIR_OprFact::illegalOpr;
154 }
155
156
generate_address(LIR_Opr base,LIR_Opr index,int shift,int disp,BasicType type)157 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
158 int shift, int disp, BasicType type) {
159 assert(base->is_register(), "must be");
160 if (index->is_constant()) {
161 LIR_Const *constant = index->as_constant_ptr();
162 #ifdef _LP64
163 jlong c;
164 if (constant->type() == T_INT) {
165 c = (jlong(index->as_jint()) << shift) + disp;
166 } else {
167 assert(constant->type() == T_LONG, "should be");
168 c = (index->as_jlong() << shift) + disp;
169 }
170 if ((jlong)((jint)c) == c) {
171 return new LIR_Address(base, (jint)c, type);
172 } else {
173 LIR_Opr tmp = new_register(T_LONG);
174 __ move(index, tmp);
175 return new LIR_Address(base, tmp, type);
176 }
177 #else
178 return new LIR_Address(base,
179 ((intx)(constant->as_jint()) << shift) + disp,
180 type);
181 #endif
182 } else {
183 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
184 }
185 }
186
187
emit_array_address(LIR_Opr array_opr,LIR_Opr index_opr,BasicType type)188 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
189 BasicType type) {
190 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
191
192 LIR_Address* addr;
193 if (index_opr->is_constant()) {
194 int elem_size = type2aelembytes(type);
195 addr = new LIR_Address(array_opr,
196 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
197 } else {
198 #ifdef _LP64
199 if (index_opr->type() == T_INT) {
200 LIR_Opr tmp = new_register(T_LONG);
201 __ convert(Bytecodes::_i2l, index_opr, tmp);
202 index_opr = tmp;
203 }
204 #endif // _LP64
205 addr = new LIR_Address(array_opr,
206 index_opr,
207 LIR_Address::scale(type),
208 offset_in_bytes, type);
209 }
210 return addr;
211 }
212
213
load_immediate(int x,BasicType type)214 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
215 LIR_Opr r = NULL;
216 if (type == T_LONG) {
217 r = LIR_OprFact::longConst(x);
218 } else if (type == T_INT) {
219 r = LIR_OprFact::intConst(x);
220 } else {
221 ShouldNotReachHere();
222 }
223 return r;
224 }
225
increment_counter(address counter,BasicType type,int step)226 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
227 LIR_Opr pointer = new_pointer_register();
228 __ move(LIR_OprFact::intptrConst(counter), pointer);
229 LIR_Address* addr = new LIR_Address(pointer, type);
230 increment_counter(addr, step);
231 }
232
233
increment_counter(LIR_Address * addr,int step)234 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
235 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
236 }
237
cmp_mem_int(LIR_Condition condition,LIR_Opr base,int disp,int c,CodeEmitInfo * info)238 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
239 __ cmp_mem_int(condition, base, disp, c, info);
240 }
241
242
cmp_reg_mem(LIR_Condition condition,LIR_Opr reg,LIR_Opr base,int disp,BasicType type,CodeEmitInfo * info)243 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
244 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
245 }
246
247
strength_reduce_multiply(LIR_Opr left,jint c,LIR_Opr result,LIR_Opr tmp)248 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
249 if (tmp->is_valid() && c > 0 && c < max_jint) {
250 if (is_power_of_2(c + 1)) {
251 __ move(left, tmp);
252 __ shift_left(left, log2_jint(c + 1), left);
253 __ sub(left, tmp, result);
254 return true;
255 } else if (is_power_of_2(c - 1)) {
256 __ move(left, tmp);
257 __ shift_left(left, log2_jint(c - 1), left);
258 __ add(left, tmp, result);
259 return true;
260 }
261 }
262 return false;
263 }
264
265
store_stack_parameter(LIR_Opr item,ByteSize offset_from_sp)266 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
267 BasicType type = item->type();
268 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
269 }
270
array_store_check(LIR_Opr value,LIR_Opr array,CodeEmitInfo * store_check_info,ciMethod * profiled_method,int profiled_bci)271 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
272 LIR_Opr tmp1 = new_register(objectType);
273 LIR_Opr tmp2 = new_register(objectType);
274 LIR_Opr tmp3 = new_register(objectType);
275 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
276 }
277
278 //----------------------------------------------------------------------
279 // visitor functions
280 //----------------------------------------------------------------------
281
do_MonitorEnter(MonitorEnter * x)282 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
283 assert(x->is_pinned(),"");
284 LIRItem obj(x->obj(), this);
285 obj.load_item();
286
287 set_no_result(x);
288
289 // "lock" stores the address of the monitor stack slot, so this is not an oop
290 LIR_Opr lock = new_register(T_INT);
291 // Need a scratch register for biased locking on x86
292 LIR_Opr scratch = LIR_OprFact::illegalOpr;
293 if (UseBiasedLocking) {
294 scratch = new_register(T_INT);
295 }
296
297 CodeEmitInfo* info_for_exception = NULL;
298 if (x->needs_null_check()) {
299 info_for_exception = state_for(x);
300 }
301 // this CodeEmitInfo must not have the xhandlers because here the
302 // object is already locked (xhandlers expect object to be unlocked)
303 CodeEmitInfo* info = state_for(x, x->state(), true);
304 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
305 x->monitor_no(), info_for_exception, info);
306 }
307
308
do_MonitorExit(MonitorExit * x)309 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
310 assert(x->is_pinned(),"");
311
312 LIRItem obj(x->obj(), this);
313 obj.dont_load_item();
314
315 LIR_Opr lock = new_register(T_INT);
316 LIR_Opr obj_temp = new_register(T_INT);
317 set_no_result(x);
318 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
319 }
320
321
322 // _ineg, _lneg, _fneg, _dneg
do_NegateOp(NegateOp * x)323 void LIRGenerator::do_NegateOp(NegateOp* x) {
324 LIRItem value(x->x(), this);
325 value.set_destroys_register();
326 value.load_item();
327 LIR_Opr reg = rlock(x);
328
329 LIR_Opr tmp = LIR_OprFact::illegalOpr;
330 #ifdef _LP64
331 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
332 if (x->type()->tag() == doubleTag) {
333 tmp = new_register(T_DOUBLE);
334 __ move(LIR_OprFact::doubleConst(-0.0), tmp);
335 }
336 else if (x->type()->tag() == floatTag) {
337 tmp = new_register(T_FLOAT);
338 __ move(LIR_OprFact::floatConst(-0.0), tmp);
339 }
340 }
341 #endif
342 __ negate(value.result(), reg, tmp);
343
344 set_result(x, round_item(reg));
345 }
346
347
348 // for _fadd, _fmul, _fsub, _fdiv, _frem
349 // _dadd, _dmul, _dsub, _ddiv, _drem
do_ArithmeticOp_FPU(ArithmeticOp * x)350 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
351 LIRItem left(x->x(), this);
352 LIRItem right(x->y(), this);
353 LIRItem* left_arg = &left;
354 LIRItem* right_arg = &right;
355 assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands");
356 bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem);
357 if (left.is_register() || x->x()->type()->is_constant() || must_load_both) {
358 left.load_item();
359 } else {
360 left.dont_load_item();
361 }
362
363 #ifndef _LP64
364 // do not load right operand if it is a constant. only 0 and 1 are
365 // loaded because there are special instructions for loading them
366 // without memory access (not needed for SSE2 instructions)
367 bool must_load_right = false;
368 if (right.is_constant()) {
369 LIR_Const* c = right.result()->as_constant_ptr();
370 assert(c != NULL, "invalid constant");
371 assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type");
372
373 if (c->type() == T_FLOAT) {
374 must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float());
375 } else {
376 must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double());
377 }
378 }
379 #endif // !LP64
380
381 if (must_load_both) {
382 // frem and drem destroy also right operand, so move it to a new register
383 right.set_destroys_register();
384 right.load_item();
385 } else if (right.is_register()) {
386 right.load_item();
387 #ifndef _LP64
388 } else if (must_load_right) {
389 right.load_item();
390 #endif // !LP64
391 } else {
392 right.dont_load_item();
393 }
394 LIR_Opr reg = rlock(x);
395 LIR_Opr tmp = LIR_OprFact::illegalOpr;
396 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
397 tmp = new_register(T_DOUBLE);
398 }
399
400 #ifdef _LP64
401 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
402 // frem and drem are implemented as a direct call into the runtime.
403 LIRItem left(x->x(), this);
404 LIRItem right(x->y(), this);
405
406 BasicType bt = as_BasicType(x->type());
407 BasicTypeList signature(2);
408 signature.append(bt);
409 signature.append(bt);
410 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
411
412 const LIR_Opr result_reg = result_register_for(x->type());
413 left.load_item_force(cc->at(0));
414 right.load_item_force(cc->at(1));
415
416 address entry = NULL;
417 switch (x->op()) {
418 case Bytecodes::_frem:
419 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
420 break;
421 case Bytecodes::_drem:
422 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
423 break;
424 default:
425 ShouldNotReachHere();
426 }
427
428 LIR_Opr result = rlock_result(x);
429 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
430 __ move(result_reg, result);
431 } else {
432 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp);
433 set_result(x, round_item(reg));
434 }
435 #else
436 if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) {
437 // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots
438 LIR_Opr fpu0, fpu1;
439 if (x->op() == Bytecodes::_frem) {
440 fpu0 = LIR_OprFact::single_fpu(0);
441 fpu1 = LIR_OprFact::single_fpu(1);
442 } else {
443 fpu0 = LIR_OprFact::double_fpu(0);
444 fpu1 = LIR_OprFact::double_fpu(1);
445 }
446 __ move(right.result(), fpu1); // order of left and right operand is important!
447 __ move(left.result(), fpu0);
448 __ rem (fpu0, fpu1, fpu0);
449 __ move(fpu0, reg);
450
451 } else {
452 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp);
453 }
454 set_result(x, round_item(reg));
455 #endif // _LP64
456 }
457
458
459 // for _ladd, _lmul, _lsub, _ldiv, _lrem
do_ArithmeticOp_Long(ArithmeticOp * x)460 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
461 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) {
462 // long division is implemented as a direct call into the runtime
463 LIRItem left(x->x(), this);
464 LIRItem right(x->y(), this);
465
466 // the check for division by zero destroys the right operand
467 right.set_destroys_register();
468
469 BasicTypeList signature(2);
470 signature.append(T_LONG);
471 signature.append(T_LONG);
472 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
473
474 // check for division by zero (destroys registers of right operand!)
475 CodeEmitInfo* info = state_for(x);
476
477 const LIR_Opr result_reg = result_register_for(x->type());
478 left.load_item_force(cc->at(1));
479 right.load_item();
480
481 __ move(right.result(), cc->at(0));
482
483 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
484 __ branch(lir_cond_equal, new DivByZeroStub(info));
485
486 address entry = NULL;
487 switch (x->op()) {
488 case Bytecodes::_lrem:
489 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
490 break; // check if dividend is 0 is done elsewhere
491 case Bytecodes::_ldiv:
492 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
493 break; // check if dividend is 0 is done elsewhere
494 default:
495 ShouldNotReachHere();
496 }
497
498 LIR_Opr result = rlock_result(x);
499 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
500 __ move(result_reg, result);
501 } else if (x->op() == Bytecodes::_lmul) {
502 // missing test if instr is commutative and if we should swap
503 LIRItem left(x->x(), this);
504 LIRItem right(x->y(), this);
505
506 // right register is destroyed by the long mul, so it must be
507 // copied to a new register.
508 right.set_destroys_register();
509
510 left.load_item();
511 right.load_item();
512
513 LIR_Opr reg = FrameMap::long0_opr;
514 arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL);
515 LIR_Opr result = rlock_result(x);
516 __ move(reg, result);
517 } else {
518 // missing test if instr is commutative and if we should swap
519 LIRItem left(x->x(), this);
520 LIRItem right(x->y(), this);
521
522 left.load_item();
523 // don't load constants to save register
524 right.load_nonconstant();
525 rlock_result(x);
526 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
527 }
528 }
529
530
531
532 // for: _iadd, _imul, _isub, _idiv, _irem
do_ArithmeticOp_Int(ArithmeticOp * x)533 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
534 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
535 // The requirements for division and modulo
536 // input : rax,: dividend min_int
537 // reg: divisor (may not be rax,/rdx) -1
538 //
539 // output: rax,: quotient (= rax, idiv reg) min_int
540 // rdx: remainder (= rax, irem reg) 0
541
542 // rax, and rdx will be destroyed
543
544 // Note: does this invalidate the spec ???
545 LIRItem right(x->y(), this);
546 LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid
547
548 // call state_for before load_item_force because state_for may
549 // force the evaluation of other instructions that are needed for
550 // correct debug info. Otherwise the live range of the fix
551 // register might be too long.
552 CodeEmitInfo* info = state_for(x);
553
554 left.load_item_force(divInOpr());
555
556 right.load_item();
557
558 LIR_Opr result = rlock_result(x);
559 LIR_Opr result_reg;
560 if (x->op() == Bytecodes::_idiv) {
561 result_reg = divOutOpr();
562 } else {
563 result_reg = remOutOpr();
564 }
565
566 if (!ImplicitDiv0Checks) {
567 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
568 __ branch(lir_cond_equal, new DivByZeroStub(info));
569 // Idiv/irem cannot trap (passing info would generate an assertion).
570 info = NULL;
571 }
572 LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
573 if (x->op() == Bytecodes::_irem) {
574 __ irem(left.result(), right.result(), result_reg, tmp, info);
575 } else if (x->op() == Bytecodes::_idiv) {
576 __ idiv(left.result(), right.result(), result_reg, tmp, info);
577 } else {
578 ShouldNotReachHere();
579 }
580
581 __ move(result_reg, result);
582 } else {
583 // missing test if instr is commutative and if we should swap
584 LIRItem left(x->x(), this);
585 LIRItem right(x->y(), this);
586 LIRItem* left_arg = &left;
587 LIRItem* right_arg = &right;
588 if (x->is_commutative() && left.is_stack() && right.is_register()) {
589 // swap them if left is real stack (or cached) and right is real register(not cached)
590 left_arg = &right;
591 right_arg = &left;
592 }
593
594 left_arg->load_item();
595
596 // do not need to load right, as we can handle stack and constants
597 if (x->op() == Bytecodes::_imul ) {
598 // check if we can use shift instead
599 bool use_constant = false;
600 bool use_tmp = false;
601 if (right_arg->is_constant()) {
602 jint iconst = right_arg->get_jint_constant();
603 if (iconst > 0 && iconst < max_jint) {
604 if (is_power_of_2(iconst)) {
605 use_constant = true;
606 } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) {
607 use_constant = true;
608 use_tmp = true;
609 }
610 }
611 }
612 if (use_constant) {
613 right_arg->dont_load_item();
614 } else {
615 right_arg->load_item();
616 }
617 LIR_Opr tmp = LIR_OprFact::illegalOpr;
618 if (use_tmp) {
619 tmp = new_register(T_INT);
620 }
621 rlock_result(x);
622
623 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
624 } else {
625 right_arg->dont_load_item();
626 rlock_result(x);
627 LIR_Opr tmp = LIR_OprFact::illegalOpr;
628 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp);
629 }
630 }
631 }
632
633
do_ArithmeticOp(ArithmeticOp * x)634 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
635 // when an operand with use count 1 is the left operand, then it is
636 // likely that no move for 2-operand-LIR-form is necessary
637 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
638 x->swap_operands();
639 }
640
641 ValueTag tag = x->type()->tag();
642 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
643 switch (tag) {
644 case floatTag:
645 case doubleTag: do_ArithmeticOp_FPU(x); return;
646 case longTag: do_ArithmeticOp_Long(x); return;
647 case intTag: do_ArithmeticOp_Int(x); return;
648 default: ShouldNotReachHere(); return;
649 }
650 }
651
652
653 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
do_ShiftOp(ShiftOp * x)654 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
655 // count must always be in rcx
656 LIRItem value(x->x(), this);
657 LIRItem count(x->y(), this);
658
659 ValueTag elemType = x->type()->tag();
660 bool must_load_count = !count.is_constant() || elemType == longTag;
661 if (must_load_count) {
662 // count for long must be in register
663 count.load_item_force(shiftCountOpr());
664 } else {
665 count.dont_load_item();
666 }
667 value.load_item();
668 LIR_Opr reg = rlock_result(x);
669
670 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
671 }
672
673
674 // _iand, _land, _ior, _lor, _ixor, _lxor
do_LogicOp(LogicOp * x)675 void LIRGenerator::do_LogicOp(LogicOp* x) {
676 // when an operand with use count 1 is the left operand, then it is
677 // likely that no move for 2-operand-LIR-form is necessary
678 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
679 x->swap_operands();
680 }
681
682 LIRItem left(x->x(), this);
683 LIRItem right(x->y(), this);
684
685 left.load_item();
686 right.load_nonconstant();
687 LIR_Opr reg = rlock_result(x);
688
689 logic_op(x->op(), reg, left.result(), right.result());
690 }
691
692
693
694 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
do_CompareOp(CompareOp * x)695 void LIRGenerator::do_CompareOp(CompareOp* x) {
696 LIRItem left(x->x(), this);
697 LIRItem right(x->y(), this);
698 ValueTag tag = x->x()->type()->tag();
699 if (tag == longTag) {
700 left.set_destroys_register();
701 }
702 left.load_item();
703 right.load_item();
704 LIR_Opr reg = rlock_result(x);
705
706 if (x->x()->type()->is_float_kind()) {
707 Bytecodes::Code code = x->op();
708 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
709 } else if (x->x()->type()->tag() == longTag) {
710 __ lcmp2int(left.result(), right.result(), reg);
711 } else {
712 Unimplemented();
713 }
714 }
715
atomic_cmpxchg(BasicType type,LIR_Opr addr,LIRItem & cmp_value,LIRItem & new_value)716 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
717 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
718 if (is_reference_type(type)) {
719 cmp_value.load_item_force(FrameMap::rax_oop_opr);
720 new_value.load_item();
721 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
722 } else if (type == T_INT) {
723 cmp_value.load_item_force(FrameMap::rax_opr);
724 new_value.load_item();
725 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
726 } else if (type == T_LONG) {
727 cmp_value.load_item_force(FrameMap::long0_opr);
728 new_value.load_item_force(FrameMap::long1_opr);
729 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
730 } else {
731 Unimplemented();
732 }
733 LIR_Opr result = new_register(T_INT);
734 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
735 result, T_INT);
736 return result;
737 }
738
atomic_xchg(BasicType type,LIR_Opr addr,LIRItem & value)739 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
740 bool is_oop = is_reference_type(type);
741 LIR_Opr result = new_register(type);
742 value.load_item();
743 // Because we want a 2-arg form of xchg and xadd
744 __ move(value.result(), result);
745 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
746 __ xchg(addr, result, result, LIR_OprFact::illegalOpr);
747 return result;
748 }
749
atomic_add(BasicType type,LIR_Opr addr,LIRItem & value)750 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
751 LIR_Opr result = new_register(type);
752 value.load_item();
753 // Because we want a 2-arg form of xchg and xadd
754 __ move(value.result(), result);
755 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
756 __ xadd(addr, result, result, LIR_OprFact::illegalOpr);
757 return result;
758 }
759
do_FmaIntrinsic(Intrinsic * x)760 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
761 assert(x->number_of_arguments() == 3, "wrong type");
762 assert(UseFMA, "Needs FMA instructions support.");
763 LIRItem value(x->argument_at(0), this);
764 LIRItem value1(x->argument_at(1), this);
765 LIRItem value2(x->argument_at(2), this);
766
767 value2.set_destroys_register();
768
769 value.load_item();
770 value1.load_item();
771 value2.load_item();
772
773 LIR_Opr calc_input = value.result();
774 LIR_Opr calc_input1 = value1.result();
775 LIR_Opr calc_input2 = value2.result();
776 LIR_Opr calc_result = rlock_result(x);
777
778 switch (x->id()) {
779 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
780 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
781 default: ShouldNotReachHere();
782 }
783
784 }
785
786
do_MathIntrinsic(Intrinsic * x)787 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
788 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
789
790 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
791 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
792 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
793 x->id() == vmIntrinsics::_dlog10) {
794 do_LibmIntrinsic(x);
795 return;
796 }
797
798 LIRItem value(x->argument_at(0), this);
799
800 bool use_fpu = false;
801 #ifndef _LP64
802 if (UseSSE < 2) {
803 value.set_destroys_register();
804 }
805 #endif // !LP64
806 value.load_item();
807
808 LIR_Opr calc_input = value.result();
809 LIR_Opr calc_result = rlock_result(x);
810
811 LIR_Opr tmp = LIR_OprFact::illegalOpr;
812 #ifdef _LP64
813 if (UseAVX > 2 && (!VM_Version::supports_avx512vl()) &&
814 (x->id() == vmIntrinsics::_dabs)) {
815 tmp = new_register(T_DOUBLE);
816 __ move(LIR_OprFact::doubleConst(-0.0), tmp);
817 }
818 #endif
819
820 switch(x->id()) {
821 case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, tmp); break;
822 case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
823 default: ShouldNotReachHere();
824 }
825
826 if (use_fpu) {
827 __ move(calc_result, x->operand());
828 }
829 }
830
do_LibmIntrinsic(Intrinsic * x)831 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
832 LIRItem value(x->argument_at(0), this);
833 value.set_destroys_register();
834
835 LIR_Opr calc_result = rlock_result(x);
836 LIR_Opr result_reg = result_register_for(x->type());
837
838 CallingConvention* cc = NULL;
839
840 if (x->id() == vmIntrinsics::_dpow) {
841 LIRItem value1(x->argument_at(1), this);
842
843 value1.set_destroys_register();
844
845 BasicTypeList signature(2);
846 signature.append(T_DOUBLE);
847 signature.append(T_DOUBLE);
848 cc = frame_map()->c_calling_convention(&signature);
849 value.load_item_force(cc->at(0));
850 value1.load_item_force(cc->at(1));
851 } else {
852 BasicTypeList signature(1);
853 signature.append(T_DOUBLE);
854 cc = frame_map()->c_calling_convention(&signature);
855 value.load_item_force(cc->at(0));
856 }
857
858 #ifndef _LP64
859 LIR_Opr tmp = FrameMap::fpu0_double_opr;
860 result_reg = tmp;
861 switch(x->id()) {
862 case vmIntrinsics::_dexp:
863 if (StubRoutines::dexp() != NULL) {
864 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
865 } else {
866 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
867 }
868 break;
869 case vmIntrinsics::_dlog:
870 if (StubRoutines::dlog() != NULL) {
871 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
872 } else {
873 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
874 }
875 break;
876 case vmIntrinsics::_dlog10:
877 if (StubRoutines::dlog10() != NULL) {
878 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
879 } else {
880 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
881 }
882 break;
883 case vmIntrinsics::_dpow:
884 if (StubRoutines::dpow() != NULL) {
885 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
886 } else {
887 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
888 }
889 break;
890 case vmIntrinsics::_dsin:
891 if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) {
892 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
893 } else {
894 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
895 }
896 break;
897 case vmIntrinsics::_dcos:
898 if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) {
899 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
900 } else {
901 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
902 }
903 break;
904 case vmIntrinsics::_dtan:
905 if (StubRoutines::dtan() != NULL) {
906 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
907 } else {
908 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
909 }
910 break;
911 default: ShouldNotReachHere();
912 }
913 #else
914 switch (x->id()) {
915 case vmIntrinsics::_dexp:
916 if (StubRoutines::dexp() != NULL) {
917 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
918 } else {
919 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
920 }
921 break;
922 case vmIntrinsics::_dlog:
923 if (StubRoutines::dlog() != NULL) {
924 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
925 } else {
926 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
927 }
928 break;
929 case vmIntrinsics::_dlog10:
930 if (StubRoutines::dlog10() != NULL) {
931 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
932 } else {
933 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
934 }
935 break;
936 case vmIntrinsics::_dpow:
937 if (StubRoutines::dpow() != NULL) {
938 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
939 } else {
940 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
941 }
942 break;
943 case vmIntrinsics::_dsin:
944 if (StubRoutines::dsin() != NULL) {
945 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
946 } else {
947 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
948 }
949 break;
950 case vmIntrinsics::_dcos:
951 if (StubRoutines::dcos() != NULL) {
952 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
953 } else {
954 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
955 }
956 break;
957 case vmIntrinsics::_dtan:
958 if (StubRoutines::dtan() != NULL) {
959 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
960 } else {
961 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
962 }
963 break;
964 default: ShouldNotReachHere();
965 }
966 #endif // _LP64
967 __ move(result_reg, calc_result);
968 }
969
do_ArrayCopy(Intrinsic * x)970 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
971 assert(x->number_of_arguments() == 5, "wrong type");
972
973 // Make all state_for calls early since they can emit code
974 CodeEmitInfo* info = state_for(x, x->state());
975
976 LIRItem src(x->argument_at(0), this);
977 LIRItem src_pos(x->argument_at(1), this);
978 LIRItem dst(x->argument_at(2), this);
979 LIRItem dst_pos(x->argument_at(3), this);
980 LIRItem length(x->argument_at(4), this);
981
982 // operands for arraycopy must use fixed registers, otherwise
983 // LinearScan will fail allocation (because arraycopy always needs a
984 // call)
985
986 #ifndef _LP64
987 src.load_item_force (FrameMap::rcx_oop_opr);
988 src_pos.load_item_force (FrameMap::rdx_opr);
989 dst.load_item_force (FrameMap::rax_oop_opr);
990 dst_pos.load_item_force (FrameMap::rbx_opr);
991 length.load_item_force (FrameMap::rdi_opr);
992 LIR_Opr tmp = (FrameMap::rsi_opr);
993 #else
994
995 // The java calling convention will give us enough registers
996 // so that on the stub side the args will be perfect already.
997 // On the other slow/special case side we call C and the arg
998 // positions are not similar enough to pick one as the best.
999 // Also because the java calling convention is a "shifted" version
1000 // of the C convention we can process the java args trivially into C
1001 // args without worry of overwriting during the xfer
1002
1003 src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
1004 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
1005 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
1006 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
1007 length.load_item_force (FrameMap::as_opr(j_rarg4));
1008
1009 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
1010 #endif // LP64
1011
1012 set_no_result(x);
1013
1014 int flags;
1015 ciArrayKlass* expected_type;
1016 arraycopy_helper(x, &flags, &expected_type);
1017
1018 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
1019 }
1020
do_update_CRC32(Intrinsic * x)1021 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1022 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
1023 // Make all state_for calls early since they can emit code
1024 LIR_Opr result = rlock_result(x);
1025 int flags = 0;
1026 switch (x->id()) {
1027 case vmIntrinsics::_updateCRC32: {
1028 LIRItem crc(x->argument_at(0), this);
1029 LIRItem val(x->argument_at(1), this);
1030 // val is destroyed by update_crc32
1031 val.set_destroys_register();
1032 crc.load_item();
1033 val.load_item();
1034 __ update_crc32(crc.result(), val.result(), result);
1035 break;
1036 }
1037 case vmIntrinsics::_updateBytesCRC32:
1038 case vmIntrinsics::_updateByteBufferCRC32: {
1039 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1040
1041 LIRItem crc(x->argument_at(0), this);
1042 LIRItem buf(x->argument_at(1), this);
1043 LIRItem off(x->argument_at(2), this);
1044 LIRItem len(x->argument_at(3), this);
1045 buf.load_item();
1046 off.load_nonconstant();
1047
1048 LIR_Opr index = off.result();
1049 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1050 if(off.result()->is_constant()) {
1051 index = LIR_OprFact::illegalOpr;
1052 offset += off.result()->as_jint();
1053 }
1054 LIR_Opr base_op = buf.result();
1055
1056 #ifndef _LP64
1057 if (!is_updateBytes) { // long b raw address
1058 base_op = new_register(T_INT);
1059 __ convert(Bytecodes::_l2i, buf.result(), base_op);
1060 }
1061 #else
1062 if (index->is_valid()) {
1063 LIR_Opr tmp = new_register(T_LONG);
1064 __ convert(Bytecodes::_i2l, index, tmp);
1065 index = tmp;
1066 }
1067 #endif
1068
1069 if (is_updateBytes) {
1070 base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op);
1071 }
1072
1073 LIR_Address* a = new LIR_Address(base_op,
1074 index,
1075 offset,
1076 T_BYTE);
1077 BasicTypeList signature(3);
1078 signature.append(T_INT);
1079 signature.append(T_ADDRESS);
1080 signature.append(T_INT);
1081 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1082 const LIR_Opr result_reg = result_register_for(x->type());
1083
1084 LIR_Opr addr = new_pointer_register();
1085 __ leal(LIR_OprFact::address(a), addr);
1086
1087 crc.load_item_force(cc->at(0));
1088 __ move(addr, cc->at(1));
1089 len.load_item_force(cc->at(2));
1090
1091 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1092 __ move(result_reg, result);
1093
1094 break;
1095 }
1096 default: {
1097 ShouldNotReachHere();
1098 }
1099 }
1100 }
1101
do_update_CRC32C(Intrinsic * x)1102 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1103 Unimplemented();
1104 }
1105
do_vectorizedMismatch(Intrinsic * x)1106 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1107 assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support");
1108
1109 // Make all state_for calls early since they can emit code
1110 LIR_Opr result = rlock_result(x);
1111
1112 LIRItem a(x->argument_at(0), this); // Object
1113 LIRItem aOffset(x->argument_at(1), this); // long
1114 LIRItem b(x->argument_at(2), this); // Object
1115 LIRItem bOffset(x->argument_at(3), this); // long
1116 LIRItem length(x->argument_at(4), this); // int
1117 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int
1118
1119 a.load_item();
1120 aOffset.load_nonconstant();
1121 b.load_item();
1122 bOffset.load_nonconstant();
1123
1124 long constant_aOffset = 0;
1125 LIR_Opr result_aOffset = aOffset.result();
1126 if (result_aOffset->is_constant()) {
1127 constant_aOffset = result_aOffset->as_jlong();
1128 result_aOffset = LIR_OprFact::illegalOpr;
1129 }
1130 LIR_Opr result_a = access_resolve(ACCESS_READ, a.result());
1131
1132 long constant_bOffset = 0;
1133 LIR_Opr result_bOffset = bOffset.result();
1134 if (result_bOffset->is_constant()) {
1135 constant_bOffset = result_bOffset->as_jlong();
1136 result_bOffset = LIR_OprFact::illegalOpr;
1137 }
1138 LIR_Opr result_b = access_resolve(ACCESS_READ, b.result());
1139
1140 #ifndef _LP64
1141 result_a = new_register(T_INT);
1142 __ convert(Bytecodes::_l2i, a.result(), result_a);
1143 result_b = new_register(T_INT);
1144 __ convert(Bytecodes::_l2i, b.result(), result_b);
1145 #endif
1146
1147
1148 LIR_Address* addr_a = new LIR_Address(result_a,
1149 result_aOffset,
1150 constant_aOffset,
1151 T_BYTE);
1152
1153 LIR_Address* addr_b = new LIR_Address(result_b,
1154 result_bOffset,
1155 constant_bOffset,
1156 T_BYTE);
1157
1158 BasicTypeList signature(4);
1159 signature.append(T_ADDRESS);
1160 signature.append(T_ADDRESS);
1161 signature.append(T_INT);
1162 signature.append(T_INT);
1163 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1164 const LIR_Opr result_reg = result_register_for(x->type());
1165
1166 LIR_Opr ptr_addr_a = new_pointer_register();
1167 __ leal(LIR_OprFact::address(addr_a), ptr_addr_a);
1168
1169 LIR_Opr ptr_addr_b = new_pointer_register();
1170 __ leal(LIR_OprFact::address(addr_b), ptr_addr_b);
1171
1172 __ move(ptr_addr_a, cc->at(0));
1173 __ move(ptr_addr_b, cc->at(1));
1174 length.load_item_force(cc->at(2));
1175 log2ArrayIndexScale.load_item_force(cc->at(3));
1176
1177 __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args());
1178 __ move(result_reg, result);
1179 }
1180
1181 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1182 // _i2b, _i2c, _i2s
fixed_register_for(BasicType type)1183 LIR_Opr fixed_register_for(BasicType type) {
1184 switch (type) {
1185 case T_FLOAT: return FrameMap::fpu0_float_opr;
1186 case T_DOUBLE: return FrameMap::fpu0_double_opr;
1187 case T_INT: return FrameMap::rax_opr;
1188 case T_LONG: return FrameMap::long0_opr;
1189 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
1190 }
1191 }
1192
do_Convert(Convert * x)1193 void LIRGenerator::do_Convert(Convert* x) {
1194 #ifdef _LP64
1195 LIRItem value(x->value(), this);
1196 value.load_item();
1197 LIR_Opr input = value.result();
1198 LIR_Opr result = rlock(x);
1199 __ convert(x->op(), input, result);
1200 assert(result->is_virtual(), "result must be virtual register");
1201 set_result(x, result);
1202 #else
1203 // flags that vary for the different operations and different SSE-settings
1204 bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false;
1205
1206 switch (x->op()) {
1207 case Bytecodes::_i2l: // fall through
1208 case Bytecodes::_l2i: // fall through
1209 case Bytecodes::_i2b: // fall through
1210 case Bytecodes::_i2c: // fall through
1211 case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break;
1212
1213 case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break;
1214 case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break;
1215 case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break;
1216 case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break;
1217 case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break;
1218 case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break;
1219 case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break;
1220 case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break;
1221 case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break;
1222 case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break;
1223 default: ShouldNotReachHere();
1224 }
1225
1226 LIRItem value(x->value(), this);
1227 value.load_item();
1228 LIR_Opr input = value.result();
1229 LIR_Opr result = rlock(x);
1230
1231 // arguments of lir_convert
1232 LIR_Opr conv_input = input;
1233 LIR_Opr conv_result = result;
1234 ConversionStub* stub = NULL;
1235
1236 if (fixed_input) {
1237 conv_input = fixed_register_for(input->type());
1238 __ move(input, conv_input);
1239 }
1240
1241 assert(fixed_result == false || round_result == false, "cannot set both");
1242 if (fixed_result) {
1243 conv_result = fixed_register_for(result->type());
1244 } else if (round_result) {
1245 result = new_register(result->type());
1246 set_vreg_flag(result, must_start_in_memory);
1247 }
1248
1249 if (needs_stub) {
1250 stub = new ConversionStub(x->op(), conv_input, conv_result);
1251 }
1252
1253 __ convert(x->op(), conv_input, conv_result, stub);
1254
1255 if (result != conv_result) {
1256 __ move(conv_result, result);
1257 }
1258
1259 assert(result->is_virtual(), "result must be virtual register");
1260 set_result(x, result);
1261 #endif // _LP64
1262 }
1263
1264
do_NewInstance(NewInstance * x)1265 void LIRGenerator::do_NewInstance(NewInstance* x) {
1266 print_if_not_loaded(x);
1267
1268 CodeEmitInfo* info = state_for(x, x->state());
1269 LIR_Opr reg = result_register_for(x->type());
1270 new_instance(reg, x->klass(), x->is_unresolved(),
1271 FrameMap::rcx_oop_opr,
1272 FrameMap::rdi_oop_opr,
1273 FrameMap::rsi_oop_opr,
1274 LIR_OprFact::illegalOpr,
1275 FrameMap::rdx_metadata_opr, info);
1276 LIR_Opr result = rlock_result(x);
1277 __ move(reg, result);
1278 }
1279
1280
do_NewTypeArray(NewTypeArray * x)1281 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1282 CodeEmitInfo* info = state_for(x, x->state());
1283
1284 LIRItem length(x->length(), this);
1285 length.load_item_force(FrameMap::rbx_opr);
1286
1287 LIR_Opr reg = result_register_for(x->type());
1288 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1289 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1290 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1291 LIR_Opr tmp4 = reg;
1292 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1293 LIR_Opr len = length.result();
1294 BasicType elem_type = x->elt_type();
1295
1296 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1297
1298 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1299 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1300
1301 LIR_Opr result = rlock_result(x);
1302 __ move(reg, result);
1303 }
1304
1305
do_NewObjectArray(NewObjectArray * x)1306 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1307 LIRItem length(x->length(), this);
1308 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1309 // and therefore provide the state before the parameters have been consumed
1310 CodeEmitInfo* patching_info = NULL;
1311 if (!x->klass()->is_loaded() || PatchALot) {
1312 patching_info = state_for(x, x->state_before());
1313 }
1314
1315 CodeEmitInfo* info = state_for(x, x->state());
1316
1317 const LIR_Opr reg = result_register_for(x->type());
1318 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1319 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1320 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1321 LIR_Opr tmp4 = reg;
1322 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1323
1324 length.load_item_force(FrameMap::rbx_opr);
1325 LIR_Opr len = length.result();
1326
1327 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1328 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1329 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1330 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1331 }
1332 klass2reg_with_patching(klass_reg, obj, patching_info);
1333 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1334
1335 LIR_Opr result = rlock_result(x);
1336 __ move(reg, result);
1337 }
1338
1339
do_NewMultiArray(NewMultiArray * x)1340 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1341 Values* dims = x->dims();
1342 int i = dims->length();
1343 LIRItemList* items = new LIRItemList(i, i, NULL);
1344 while (i-- > 0) {
1345 LIRItem* size = new LIRItem(dims->at(i), this);
1346 items->at_put(i, size);
1347 }
1348
1349 // Evaluate state_for early since it may emit code.
1350 CodeEmitInfo* patching_info = NULL;
1351 if (!x->klass()->is_loaded() || PatchALot) {
1352 patching_info = state_for(x, x->state_before());
1353
1354 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1355 // clone all handlers (NOTE: Usually this is handled transparently
1356 // by the CodeEmitInfo cloning logic in CodeStub constructors but
1357 // is done explicitly here because a stub isn't being used).
1358 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1359 }
1360 CodeEmitInfo* info = state_for(x, x->state());
1361
1362 i = dims->length();
1363 while (i-- > 0) {
1364 LIRItem* size = items->at(i);
1365 size->load_nonconstant();
1366
1367 store_stack_parameter(size->result(), in_ByteSize(i*4));
1368 }
1369
1370 LIR_Opr klass_reg = FrameMap::rax_metadata_opr;
1371 klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1372
1373 LIR_Opr rank = FrameMap::rbx_opr;
1374 __ move(LIR_OprFact::intConst(x->rank()), rank);
1375 LIR_Opr varargs = FrameMap::rcx_opr;
1376 __ move(FrameMap::rsp_opr, varargs);
1377 LIR_OprList* args = new LIR_OprList(3);
1378 args->append(klass_reg);
1379 args->append(rank);
1380 args->append(varargs);
1381 LIR_Opr reg = result_register_for(x->type());
1382 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1383 LIR_OprFact::illegalOpr,
1384 reg, args, info);
1385
1386 LIR_Opr result = rlock_result(x);
1387 __ move(reg, result);
1388 }
1389
1390
do_BlockBegin(BlockBegin * x)1391 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1392 // nothing to do for now
1393 }
1394
1395
do_CheckCast(CheckCast * x)1396 void LIRGenerator::do_CheckCast(CheckCast* x) {
1397 LIRItem obj(x->obj(), this);
1398
1399 CodeEmitInfo* patching_info = NULL;
1400 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1401 // must do this before locking the destination register as an oop register,
1402 // and before the obj is loaded (the latter is for deoptimization)
1403 patching_info = state_for(x, x->state_before());
1404 }
1405 obj.load_item();
1406
1407 // info for exceptions
1408 CodeEmitInfo* info_for_exception =
1409 (x->needs_exception_state() ? state_for(x) :
1410 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1411
1412 CodeStub* stub;
1413 if (x->is_incompatible_class_change_check()) {
1414 assert(patching_info == NULL, "can't patch this");
1415 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1416 } else if (x->is_invokespecial_receiver_check()) {
1417 assert(patching_info == NULL, "can't patch this");
1418 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1419 } else {
1420 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1421 }
1422 LIR_Opr reg = rlock_result(x);
1423 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1424 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1425 tmp3 = new_register(objectType);
1426 }
1427 __ checkcast(reg, obj.result(), x->klass(),
1428 new_register(objectType), new_register(objectType), tmp3,
1429 x->direct_compare(), info_for_exception, patching_info, stub,
1430 x->profiled_method(), x->profiled_bci());
1431 }
1432
1433
do_InstanceOf(InstanceOf * x)1434 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1435 LIRItem obj(x->obj(), this);
1436
1437 // result and test object may not be in same register
1438 LIR_Opr reg = rlock_result(x);
1439 CodeEmitInfo* patching_info = NULL;
1440 if ((!x->klass()->is_loaded() || PatchALot)) {
1441 // must do this before locking the destination register as an oop register
1442 patching_info = state_for(x, x->state_before());
1443 }
1444 obj.load_item();
1445 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1446 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1447 tmp3 = new_register(objectType);
1448 }
1449 __ instanceof(reg, obj.result(), x->klass(),
1450 new_register(objectType), new_register(objectType), tmp3,
1451 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1452 }
1453
1454
do_If(If * x)1455 void LIRGenerator::do_If(If* x) {
1456 assert(x->number_of_sux() == 2, "inconsistency");
1457 ValueTag tag = x->x()->type()->tag();
1458 bool is_safepoint = x->is_safepoint();
1459
1460 If::Condition cond = x->cond();
1461
1462 LIRItem xitem(x->x(), this);
1463 LIRItem yitem(x->y(), this);
1464 LIRItem* xin = &xitem;
1465 LIRItem* yin = &yitem;
1466
1467 if (tag == longTag) {
1468 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1469 // mirror for other conditions
1470 if (cond == If::gtr || cond == If::leq) {
1471 cond = Instruction::mirror(cond);
1472 xin = &yitem;
1473 yin = &xitem;
1474 }
1475 xin->set_destroys_register();
1476 }
1477 xin->load_item();
1478 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1479 // inline long zero
1480 yin->dont_load_item();
1481 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1482 // longs cannot handle constants at right side
1483 yin->load_item();
1484 } else {
1485 yin->dont_load_item();
1486 }
1487
1488 LIR_Opr left = xin->result();
1489 LIR_Opr right = yin->result();
1490
1491 set_no_result(x);
1492
1493 // add safepoint before generating condition code so it can be recomputed
1494 if (x->is_safepoint()) {
1495 // increment backedge counter if needed
1496 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1497 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1498 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1499 }
1500
1501 __ cmp(lir_cond(cond), left, right);
1502 // Generate branch profiling. Profiling code doesn't kill flags.
1503 profile_branch(x, cond);
1504 move_to_phi(x->state());
1505 if (x->x()->type()->is_float_kind()) {
1506 __ branch(lir_cond(cond), x->tsux(), x->usux());
1507 } else {
1508 __ branch(lir_cond(cond), x->tsux());
1509 }
1510 assert(x->default_sux() == x->fsux(), "wrong destination above");
1511 __ jump(x->default_sux());
1512 }
1513
1514
getThreadPointer()1515 LIR_Opr LIRGenerator::getThreadPointer() {
1516 #ifdef _LP64
1517 return FrameMap::as_pointer_opr(r15_thread);
1518 #else
1519 LIR_Opr result = new_register(T_INT);
1520 __ get_thread(result);
1521 return result;
1522 #endif //
1523 }
1524
trace_block_entry(BlockBegin * block)1525 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1526 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0));
1527 LIR_OprList* args = new LIR_OprList();
1528 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1529 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1530 }
1531
1532
volatile_field_store(LIR_Opr value,LIR_Address * address,CodeEmitInfo * info)1533 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1534 CodeEmitInfo* info) {
1535 if (address->type() == T_LONG) {
1536 address = new LIR_Address(address->base(),
1537 address->index(), address->scale(),
1538 address->disp(), T_DOUBLE);
1539 // Transfer the value atomically by using FP moves. This means
1540 // the value has to be moved between CPU and FPU registers. It
1541 // always has to be moved through spill slot since there's no
1542 // quick way to pack the value into an SSE register.
1543 LIR_Opr temp_double = new_register(T_DOUBLE);
1544 LIR_Opr spill = new_register(T_LONG);
1545 set_vreg_flag(spill, must_start_in_memory);
1546 __ move(value, spill);
1547 __ volatile_move(spill, temp_double, T_LONG);
1548 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
1549 } else {
1550 __ store(value, address, info);
1551 }
1552 }
1553
volatile_field_load(LIR_Address * address,LIR_Opr result,CodeEmitInfo * info)1554 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1555 CodeEmitInfo* info) {
1556 if (address->type() == T_LONG) {
1557 address = new LIR_Address(address->base(),
1558 address->index(), address->scale(),
1559 address->disp(), T_DOUBLE);
1560 // Transfer the value atomically by using FP moves. This means
1561 // the value has to be moved between CPU and FPU registers. In
1562 // SSE0 and SSE1 mode it has to be moved through spill slot but in
1563 // SSE2+ mode it can be moved directly.
1564 LIR_Opr temp_double = new_register(T_DOUBLE);
1565 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1566 __ volatile_move(temp_double, result, T_LONG);
1567 #ifndef _LP64
1568 if (UseSSE < 2) {
1569 // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1570 set_vreg_flag(result, must_start_in_memory);
1571 }
1572 #endif // !LP64
1573 } else {
1574 __ load(address, result, info);
1575 }
1576 }
1577