1 /*
2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "ci/ciObjArray.hpp"
36 #include "ci/ciUtilities.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/c1/barrierSetC1.hpp"
39 #include "oops/klass.inline.hpp"
40 #include "runtime/arguments.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/vm_version.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/macros.hpp"
46
47 #ifdef ASSERT
48 #define __ gen()->lir(__FILE__, __LINE__)->
49 #else
50 #define __ gen()->lir()->
51 #endif
52
53 #ifndef PATCHED_ADDR
54 #define PATCHED_ADDR (max_jint)
55 #endif
56
reset()57 void PhiResolverState::reset() {
58 _virtual_operands.clear();
59 _other_operands.clear();
60 _vreg_table.clear();
61 }
62
63
64 //--------------------------------------------------------------
65 // PhiResolver
66
67 // Resolves cycles:
68 //
69 // r1 := r2 becomes temp := r1
70 // r2 := r1 r1 := r2
71 // r2 := temp
72 // and orders moves:
73 //
74 // r2 := r3 becomes r1 := r2
75 // r1 := r2 r2 := r3
76
PhiResolver(LIRGenerator * gen)77 PhiResolver::PhiResolver(LIRGenerator* gen)
78 : _gen(gen)
79 , _state(gen->resolver_state())
80 , _temp(LIR_OprFact::illegalOpr)
81 {
82 // reinitialize the shared state arrays
83 _state.reset();
84 }
85
86
emit_move(LIR_Opr src,LIR_Opr dest)87 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
88 assert(src->is_valid(), "");
89 assert(dest->is_valid(), "");
90 __ move(src, dest);
91 }
92
93
move_temp_to(LIR_Opr dest)94 void PhiResolver::move_temp_to(LIR_Opr dest) {
95 assert(_temp->is_valid(), "");
96 emit_move(_temp, dest);
97 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
98 }
99
100
move_to_temp(LIR_Opr src)101 void PhiResolver::move_to_temp(LIR_Opr src) {
102 assert(_temp->is_illegal(), "");
103 _temp = _gen->new_register(src->type());
104 emit_move(src, _temp);
105 }
106
107
108 // Traverse assignment graph in depth first order and generate moves in post order
109 // ie. two assignments: b := c, a := b start with node c:
110 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
111 // Generates moves in this order: move b to a and move c to b
112 // ie. cycle a := b, b := a start with node a
113 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
114 // Generates moves in this order: move b to temp, move a to b, move temp to a
move(ResolveNode * src,ResolveNode * dest)115 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
116 if (!dest->visited()) {
117 dest->set_visited();
118 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
119 move(dest, dest->destination_at(i));
120 }
121 } else if (!dest->start_node()) {
122 // cylce in graph detected
123 assert(_loop == NULL, "only one loop valid!");
124 _loop = dest;
125 move_to_temp(src->operand());
126 return;
127 } // else dest is a start node
128
129 if (!dest->assigned()) {
130 if (_loop == dest) {
131 move_temp_to(dest->operand());
132 dest->set_assigned();
133 } else if (src != NULL) {
134 emit_move(src->operand(), dest->operand());
135 dest->set_assigned();
136 }
137 }
138 }
139
140
~PhiResolver()141 PhiResolver::~PhiResolver() {
142 int i;
143 // resolve any cycles in moves from and to virtual registers
144 for (i = virtual_operands().length() - 1; i >= 0; i --) {
145 ResolveNode* node = virtual_operands().at(i);
146 if (!node->visited()) {
147 _loop = NULL;
148 move(NULL, node);
149 node->set_start_node();
150 assert(_temp->is_illegal(), "move_temp_to() call missing");
151 }
152 }
153
154 // generate move for move from non virtual register to abitrary destination
155 for (i = other_operands().length() - 1; i >= 0; i --) {
156 ResolveNode* node = other_operands().at(i);
157 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
158 emit_move(node->operand(), node->destination_at(j)->operand());
159 }
160 }
161 }
162
163
create_node(LIR_Opr opr,bool source)164 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
165 ResolveNode* node;
166 if (opr->is_virtual()) {
167 int vreg_num = opr->vreg_number();
168 node = vreg_table().at_grow(vreg_num, NULL);
169 assert(node == NULL || node->operand() == opr, "");
170 if (node == NULL) {
171 node = new ResolveNode(opr);
172 vreg_table().at_put(vreg_num, node);
173 }
174 // Make sure that all virtual operands show up in the list when
175 // they are used as the source of a move.
176 if (source && !virtual_operands().contains(node)) {
177 virtual_operands().append(node);
178 }
179 } else {
180 assert(source, "");
181 node = new ResolveNode(opr);
182 other_operands().append(node);
183 }
184 return node;
185 }
186
187
move(LIR_Opr src,LIR_Opr dest)188 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
189 assert(dest->is_virtual(), "");
190 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
191 assert(src->is_valid(), "");
192 assert(dest->is_valid(), "");
193 ResolveNode* source = source_node(src);
194 source->append(destination_node(dest));
195 }
196
197
198 //--------------------------------------------------------------
199 // LIRItem
200
set_result(LIR_Opr opr)201 void LIRItem::set_result(LIR_Opr opr) {
202 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
203 value()->set_operand(opr);
204
205 if (opr->is_virtual()) {
206 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
207 }
208
209 _result = opr;
210 }
211
load_item()212 void LIRItem::load_item() {
213 if (result()->is_illegal()) {
214 // update the items result
215 _result = value()->operand();
216 }
217 if (!result()->is_register()) {
218 LIR_Opr reg = _gen->new_register(value()->type());
219 __ move(result(), reg);
220 if (result()->is_constant()) {
221 _result = reg;
222 } else {
223 set_result(reg);
224 }
225 }
226 }
227
228
load_for_store(BasicType type)229 void LIRItem::load_for_store(BasicType type) {
230 if (_gen->can_store_as_constant(value(), type)) {
231 _result = value()->operand();
232 if (!_result->is_constant()) {
233 _result = LIR_OprFact::value_type(value()->type());
234 }
235 } else if (type == T_BYTE || type == T_BOOLEAN) {
236 load_byte_item();
237 } else {
238 load_item();
239 }
240 }
241
load_item_force(LIR_Opr reg)242 void LIRItem::load_item_force(LIR_Opr reg) {
243 LIR_Opr r = result();
244 if (r != reg) {
245 #if !defined(ARM) && !defined(E500V2)
246 if (r->type() != reg->type()) {
247 // moves between different types need an intervening spill slot
248 r = _gen->force_to_spill(r, reg->type());
249 }
250 #endif
251 __ move(r, reg);
252 _result = reg;
253 }
254 }
255
get_jobject_constant() const256 ciObject* LIRItem::get_jobject_constant() const {
257 ObjectType* oc = type()->as_ObjectType();
258 if (oc) {
259 return oc->constant_value();
260 }
261 return NULL;
262 }
263
264
get_jint_constant() const265 jint LIRItem::get_jint_constant() const {
266 assert(is_constant() && value() != NULL, "");
267 assert(type()->as_IntConstant() != NULL, "type check");
268 return type()->as_IntConstant()->value();
269 }
270
271
get_address_constant() const272 jint LIRItem::get_address_constant() const {
273 assert(is_constant() && value() != NULL, "");
274 assert(type()->as_AddressConstant() != NULL, "type check");
275 return type()->as_AddressConstant()->value();
276 }
277
278
get_jfloat_constant() const279 jfloat LIRItem::get_jfloat_constant() const {
280 assert(is_constant() && value() != NULL, "");
281 assert(type()->as_FloatConstant() != NULL, "type check");
282 return type()->as_FloatConstant()->value();
283 }
284
285
get_jdouble_constant() const286 jdouble LIRItem::get_jdouble_constant() const {
287 assert(is_constant() && value() != NULL, "");
288 assert(type()->as_DoubleConstant() != NULL, "type check");
289 return type()->as_DoubleConstant()->value();
290 }
291
292
get_jlong_constant() const293 jlong LIRItem::get_jlong_constant() const {
294 assert(is_constant() && value() != NULL, "");
295 assert(type()->as_LongConstant() != NULL, "type check");
296 return type()->as_LongConstant()->value();
297 }
298
299
300
301 //--------------------------------------------------------------
302
303
block_do_prolog(BlockBegin * block)304 void LIRGenerator::block_do_prolog(BlockBegin* block) {
305 #ifndef PRODUCT
306 if (PrintIRWithLIR) {
307 block->print();
308 }
309 #endif
310
311 // set up the list of LIR instructions
312 assert(block->lir() == NULL, "LIR list already computed for this block");
313 _lir = new LIR_List(compilation(), block);
314 block->set_lir(_lir);
315
316 __ branch_destination(block->label());
317
318 if (LIRTraceExecution &&
319 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
320 !block->is_set(BlockBegin::exception_entry_flag)) {
321 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
322 trace_block_entry(block);
323 }
324 }
325
326
block_do_epilog(BlockBegin * block)327 void LIRGenerator::block_do_epilog(BlockBegin* block) {
328 #ifndef PRODUCT
329 if (PrintIRWithLIR) {
330 tty->cr();
331 }
332 #endif
333
334 // LIR_Opr for unpinned constants shouldn't be referenced by other
335 // blocks so clear them out after processing the block.
336 for (int i = 0; i < _unpinned_constants.length(); i++) {
337 _unpinned_constants.at(i)->clear_operand();
338 }
339 _unpinned_constants.trunc_to(0);
340
341 // clear our any registers for other local constants
342 _constants.trunc_to(0);
343 _reg_for_constants.trunc_to(0);
344 }
345
346
block_do(BlockBegin * block)347 void LIRGenerator::block_do(BlockBegin* block) {
348 CHECK_BAILOUT();
349
350 block_do_prolog(block);
351 set_block(block);
352
353 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
354 if (instr->is_pinned()) do_root(instr);
355 }
356
357 set_block(NULL);
358 block_do_epilog(block);
359 }
360
361
362 //-------------------------LIRGenerator-----------------------------
363
364 // This is where the tree-walk starts; instr must be root;
do_root(Value instr)365 void LIRGenerator::do_root(Value instr) {
366 CHECK_BAILOUT();
367
368 InstructionMark im(compilation(), instr);
369
370 assert(instr->is_pinned(), "use only with roots");
371 assert(instr->subst() == instr, "shouldn't have missed substitution");
372
373 instr->visit(this);
374
375 assert(!instr->has_uses() || instr->operand()->is_valid() ||
376 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
377 }
378
379
380 // This is called for each node in tree; the walk stops if a root is reached
walk(Value instr)381 void LIRGenerator::walk(Value instr) {
382 InstructionMark im(compilation(), instr);
383 //stop walk when encounter a root
384 if ((instr->is_pinned() && instr->as_Phi() == NULL) || instr->operand()->is_valid()) {
385 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
386 } else {
387 assert(instr->subst() == instr, "shouldn't have missed substitution");
388 instr->visit(this);
389 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
390 }
391 }
392
393
state_for(Instruction * x,ValueStack * state,bool ignore_xhandler)394 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
395 assert(state != NULL, "state must be defined");
396
397 #ifndef PRODUCT
398 state->verify();
399 #endif
400
401 ValueStack* s = state;
402 for_each_state(s) {
403 if (s->kind() == ValueStack::EmptyExceptionState) {
404 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
405 continue;
406 }
407
408 int index;
409 Value value;
410 for_each_stack_value(s, index, value) {
411 assert(value->subst() == value, "missed substitution");
412 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
413 walk(value);
414 assert(value->operand()->is_valid(), "must be evaluated now");
415 }
416 }
417
418 int bci = s->bci();
419 IRScope* scope = s->scope();
420 ciMethod* method = scope->method();
421
422 MethodLivenessResult liveness = method->liveness_at_bci(bci);
423 if (bci == SynchronizationEntryBCI) {
424 if (x->as_ExceptionObject() || x->as_Throw()) {
425 // all locals are dead on exit from the synthetic unlocker
426 liveness.clear();
427 } else {
428 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
429 }
430 }
431 if (!liveness.is_valid()) {
432 // Degenerate or breakpointed method.
433 bailout("Degenerate or breakpointed method");
434 } else {
435 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
436 for_each_local_value(s, index, value) {
437 assert(value->subst() == value, "missed substition");
438 if (liveness.at(index) && !value->type()->is_illegal()) {
439 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
440 walk(value);
441 assert(value->operand()->is_valid(), "must be evaluated now");
442 }
443 } else {
444 // NULL out this local so that linear scan can assume that all non-NULL values are live.
445 s->invalidate_local(index);
446 }
447 }
448 }
449 }
450
451 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
452 }
453
454
state_for(Instruction * x)455 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
456 return state_for(x, x->exception_state());
457 }
458
459
klass2reg_with_patching(LIR_Opr r,ciMetadata * obj,CodeEmitInfo * info,bool need_resolve)460 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
461 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
462 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
463 * the class. */
464 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
465 assert(info != NULL, "info must be set if class is not loaded");
466 __ klass2reg_patch(NULL, r, info);
467 } else {
468 // no patching needed
469 __ metadata2reg(obj->constant_encoding(), r);
470 }
471 }
472
473
array_range_check(LIR_Opr array,LIR_Opr index,CodeEmitInfo * null_check_info,CodeEmitInfo * range_check_info)474 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
475 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
476 CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
477 if (index->is_constant()) {
478 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
479 index->as_jint(), null_check_info);
480 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
481 } else {
482 cmp_reg_mem(lir_cond_aboveEqual, index, array,
483 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
484 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
485 }
486 }
487
488
nio_range_check(LIR_Opr buffer,LIR_Opr index,LIR_Opr result,CodeEmitInfo * info)489 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
490 CodeStub* stub = new RangeCheckStub(info, index);
491 if (index->is_constant()) {
492 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
493 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
494 } else {
495 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
496 java_nio_Buffer::limit_offset(), T_INT, info);
497 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
498 }
499 __ move(index, result);
500 }
501
502
503
arithmetic_op(Bytecodes::Code code,LIR_Opr result,LIR_Opr left,LIR_Opr right,bool is_strictfp,LIR_Opr tmp_op,CodeEmitInfo * info)504 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
505 LIR_Opr result_op = result;
506 LIR_Opr left_op = left;
507 LIR_Opr right_op = right;
508
509 if (TwoOperandLIRForm && left_op != result_op) {
510 assert(right_op != result_op, "malformed");
511 __ move(left_op, result_op);
512 left_op = result_op;
513 }
514
515 switch(code) {
516 case Bytecodes::_dadd:
517 case Bytecodes::_fadd:
518 case Bytecodes::_ladd:
519 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
520 case Bytecodes::_fmul:
521 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
522
523 case Bytecodes::_dmul:
524 {
525 if (is_strictfp) {
526 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
527 } else {
528 __ mul(left_op, right_op, result_op); break;
529 }
530 }
531 break;
532
533 case Bytecodes::_imul:
534 {
535 bool did_strength_reduce = false;
536
537 if (right->is_constant()) {
538 jint c = right->as_jint();
539 if (c > 0 && is_power_of_2(c)) {
540 // do not need tmp here
541 __ shift_left(left_op, exact_log2(c), result_op);
542 did_strength_reduce = true;
543 } else {
544 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
545 }
546 }
547 // we couldn't strength reduce so just emit the multiply
548 if (!did_strength_reduce) {
549 __ mul(left_op, right_op, result_op);
550 }
551 }
552 break;
553
554 case Bytecodes::_dsub:
555 case Bytecodes::_fsub:
556 case Bytecodes::_lsub:
557 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
558
559 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
560 // ldiv and lrem are implemented with a direct runtime call
561
562 case Bytecodes::_ddiv:
563 {
564 if (is_strictfp) {
565 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
566 } else {
567 __ div (left_op, right_op, result_op); break;
568 }
569 }
570 break;
571
572 case Bytecodes::_drem:
573 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
574
575 default: ShouldNotReachHere();
576 }
577 }
578
579
arithmetic_op_int(Bytecodes::Code code,LIR_Opr result,LIR_Opr left,LIR_Opr right,LIR_Opr tmp)580 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
581 arithmetic_op(code, result, left, right, false, tmp);
582 }
583
584
arithmetic_op_long(Bytecodes::Code code,LIR_Opr result,LIR_Opr left,LIR_Opr right,CodeEmitInfo * info)585 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
586 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
587 }
588
589
arithmetic_op_fpu(Bytecodes::Code code,LIR_Opr result,LIR_Opr left,LIR_Opr right,bool is_strictfp,LIR_Opr tmp)590 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
591 arithmetic_op(code, result, left, right, is_strictfp, tmp);
592 }
593
594
shift_op(Bytecodes::Code code,LIR_Opr result_op,LIR_Opr value,LIR_Opr count,LIR_Opr tmp)595 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
596
597 if (TwoOperandLIRForm && value != result_op
598 // Only 32bit right shifts require two operand form on S390.
599 S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
600 assert(count != result_op, "malformed");
601 __ move(value, result_op);
602 value = result_op;
603 }
604
605 assert(count->is_constant() || count->is_register(), "must be");
606 switch(code) {
607 case Bytecodes::_ishl:
608 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
609 case Bytecodes::_ishr:
610 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
611 case Bytecodes::_iushr:
612 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
613 default: ShouldNotReachHere();
614 }
615 }
616
617
logic_op(Bytecodes::Code code,LIR_Opr result_op,LIR_Opr left_op,LIR_Opr right_op)618 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
619 if (TwoOperandLIRForm && left_op != result_op) {
620 assert(right_op != result_op, "malformed");
621 __ move(left_op, result_op);
622 left_op = result_op;
623 }
624
625 switch(code) {
626 case Bytecodes::_iand:
627 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
628
629 case Bytecodes::_ior:
630 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
631
632 case Bytecodes::_ixor:
633 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
634
635 default: ShouldNotReachHere();
636 }
637 }
638
639
monitor_enter(LIR_Opr object,LIR_Opr lock,LIR_Opr hdr,LIR_Opr scratch,int monitor_no,CodeEmitInfo * info_for_exception,CodeEmitInfo * info)640 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
641 if (!GenerateSynchronizationCode) return;
642 // for slow path, use debug info for state after successful locking
643 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
644 __ load_stack_address_monitor(monitor_no, lock);
645 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
646 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
647 }
648
649
monitor_exit(LIR_Opr object,LIR_Opr lock,LIR_Opr new_hdr,LIR_Opr scratch,int monitor_no)650 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
651 if (!GenerateSynchronizationCode) return;
652 // setup registers
653 LIR_Opr hdr = lock;
654 lock = new_hdr;
655 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
656 __ load_stack_address_monitor(monitor_no, lock);
657 __ unlock_object(hdr, object, lock, scratch, slow_path);
658 }
659
660 #ifndef PRODUCT
print_if_not_loaded(const NewInstance * new_instance)661 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
662 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
663 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
664 } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
665 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
666 }
667 }
668 #endif
669
new_instance(LIR_Opr dst,ciInstanceKlass * klass,bool is_unresolved,LIR_Opr scratch1,LIR_Opr scratch2,LIR_Opr scratch3,LIR_Opr scratch4,LIR_Opr klass_reg,CodeEmitInfo * info)670 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
671 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
672 // If klass is not loaded we do not know if the klass has finalizers:
673 if (UseFastNewInstance && klass->is_loaded()
674 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
675
676 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
677
678 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
679
680 assert(klass->is_loaded(), "must be loaded");
681 // allocate space for instance
682 assert(klass->size_helper() >= 0, "illegal instance size");
683 const int instance_size = align_object_size(klass->size_helper());
684 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
685 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
686 } else {
687 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
688 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
689 __ branch_destination(slow_path->continuation());
690 }
691 }
692
693
is_constant_zero(Instruction * inst)694 static bool is_constant_zero(Instruction* inst) {
695 IntConstant* c = inst->type()->as_IntConstant();
696 if (c) {
697 return (c->value() == 0);
698 }
699 return false;
700 }
701
702
positive_constant(Instruction * inst)703 static bool positive_constant(Instruction* inst) {
704 IntConstant* c = inst->type()->as_IntConstant();
705 if (c) {
706 return (c->value() >= 0);
707 }
708 return false;
709 }
710
711
as_array_klass(ciType * type)712 static ciArrayKlass* as_array_klass(ciType* type) {
713 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
714 return (ciArrayKlass*)type;
715 } else {
716 return NULL;
717 }
718 }
719
phi_declared_type(Phi * phi)720 static ciType* phi_declared_type(Phi* phi) {
721 ciType* t = phi->operand_at(0)->declared_type();
722 if (t == NULL) {
723 return NULL;
724 }
725 for(int i = 1; i < phi->operand_count(); i++) {
726 if (t != phi->operand_at(i)->declared_type()) {
727 return NULL;
728 }
729 }
730 return t;
731 }
732
arraycopy_helper(Intrinsic * x,int * flagsp,ciArrayKlass ** expected_typep)733 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
734 Instruction* src = x->argument_at(0);
735 Instruction* src_pos = x->argument_at(1);
736 Instruction* dst = x->argument_at(2);
737 Instruction* dst_pos = x->argument_at(3);
738 Instruction* length = x->argument_at(4);
739
740 // first try to identify the likely type of the arrays involved
741 ciArrayKlass* expected_type = NULL;
742 bool is_exact = false, src_objarray = false, dst_objarray = false;
743 {
744 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
745 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
746 Phi* phi;
747 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
748 src_declared_type = as_array_klass(phi_declared_type(phi));
749 }
750 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
751 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
752 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
753 dst_declared_type = as_array_klass(phi_declared_type(phi));
754 }
755
756 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
757 // the types exactly match so the type is fully known
758 is_exact = true;
759 expected_type = src_exact_type;
760 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
761 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
762 ciArrayKlass* src_type = NULL;
763 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
764 src_type = (ciArrayKlass*) src_exact_type;
765 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
766 src_type = (ciArrayKlass*) src_declared_type;
767 }
768 if (src_type != NULL) {
769 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
770 is_exact = true;
771 expected_type = dst_type;
772 }
773 }
774 }
775 // at least pass along a good guess
776 if (expected_type == NULL) expected_type = dst_exact_type;
777 if (expected_type == NULL) expected_type = src_declared_type;
778 if (expected_type == NULL) expected_type = dst_declared_type;
779
780 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
781 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
782 }
783
784 // if a probable array type has been identified, figure out if any
785 // of the required checks for a fast case can be elided.
786 int flags = LIR_OpArrayCopy::all_flags;
787
788 if (!src_objarray)
789 flags &= ~LIR_OpArrayCopy::src_objarray;
790 if (!dst_objarray)
791 flags &= ~LIR_OpArrayCopy::dst_objarray;
792
793 if (!x->arg_needs_null_check(0))
794 flags &= ~LIR_OpArrayCopy::src_null_check;
795 if (!x->arg_needs_null_check(2))
796 flags &= ~LIR_OpArrayCopy::dst_null_check;
797
798
799 if (expected_type != NULL) {
800 Value length_limit = NULL;
801
802 IfOp* ifop = length->as_IfOp();
803 if (ifop != NULL) {
804 // look for expressions like min(v, a.length) which ends up as
805 // x > y ? y : x or x >= y ? y : x
806 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
807 ifop->x() == ifop->fval() &&
808 ifop->y() == ifop->tval()) {
809 length_limit = ifop->y();
810 }
811 }
812
813 // try to skip null checks and range checks
814 NewArray* src_array = src->as_NewArray();
815 if (src_array != NULL) {
816 flags &= ~LIR_OpArrayCopy::src_null_check;
817 if (length_limit != NULL &&
818 src_array->length() == length_limit &&
819 is_constant_zero(src_pos)) {
820 flags &= ~LIR_OpArrayCopy::src_range_check;
821 }
822 }
823
824 NewArray* dst_array = dst->as_NewArray();
825 if (dst_array != NULL) {
826 flags &= ~LIR_OpArrayCopy::dst_null_check;
827 if (length_limit != NULL &&
828 dst_array->length() == length_limit &&
829 is_constant_zero(dst_pos)) {
830 flags &= ~LIR_OpArrayCopy::dst_range_check;
831 }
832 }
833
834 // check from incoming constant values
835 if (positive_constant(src_pos))
836 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
837 if (positive_constant(dst_pos))
838 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
839 if (positive_constant(length))
840 flags &= ~LIR_OpArrayCopy::length_positive_check;
841
842 // see if the range check can be elided, which might also imply
843 // that src or dst is non-null.
844 ArrayLength* al = length->as_ArrayLength();
845 if (al != NULL) {
846 if (al->array() == src) {
847 // it's the length of the source array
848 flags &= ~LIR_OpArrayCopy::length_positive_check;
849 flags &= ~LIR_OpArrayCopy::src_null_check;
850 if (is_constant_zero(src_pos))
851 flags &= ~LIR_OpArrayCopy::src_range_check;
852 }
853 if (al->array() == dst) {
854 // it's the length of the destination array
855 flags &= ~LIR_OpArrayCopy::length_positive_check;
856 flags &= ~LIR_OpArrayCopy::dst_null_check;
857 if (is_constant_zero(dst_pos))
858 flags &= ~LIR_OpArrayCopy::dst_range_check;
859 }
860 }
861 if (is_exact) {
862 flags &= ~LIR_OpArrayCopy::type_check;
863 }
864 }
865
866 IntConstant* src_int = src_pos->type()->as_IntConstant();
867 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
868 if (src_int && dst_int) {
869 int s_offs = src_int->value();
870 int d_offs = dst_int->value();
871 if (src_int->value() >= dst_int->value()) {
872 flags &= ~LIR_OpArrayCopy::overlapping;
873 }
874 if (expected_type != NULL) {
875 BasicType t = expected_type->element_type()->basic_type();
876 int element_size = type2aelembytes(t);
877 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
878 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
879 flags &= ~LIR_OpArrayCopy::unaligned;
880 }
881 }
882 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
883 // src and dest positions are the same, or dst is zero so assume
884 // nonoverlapping copy.
885 flags &= ~LIR_OpArrayCopy::overlapping;
886 }
887
888 if (src == dst) {
889 // moving within a single array so no type checks are needed
890 if (flags & LIR_OpArrayCopy::type_check) {
891 flags &= ~LIR_OpArrayCopy::type_check;
892 }
893 }
894 *flagsp = flags;
895 *expected_typep = (ciArrayKlass*)expected_type;
896 }
897
898
round_item(LIR_Opr opr)899 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
900 assert(opr->is_register(), "why spill if item is not register?");
901
902 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
903 LIR_Opr result = new_register(T_FLOAT);
904 set_vreg_flag(result, must_start_in_memory);
905 assert(opr->is_register(), "only a register can be spilled");
906 assert(opr->value_type()->is_float(), "rounding only for floats available");
907 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
908 return result;
909 }
910 return opr;
911 }
912
913
force_to_spill(LIR_Opr value,BasicType t)914 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
915 assert(type2size[t] == type2size[value->type()],
916 "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
917 if (!value->is_register()) {
918 // force into a register
919 LIR_Opr r = new_register(value->type());
920 __ move(value, r);
921 value = r;
922 }
923
924 // create a spill location
925 LIR_Opr tmp = new_register(t);
926 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
927
928 // move from register to spill
929 __ move(value, tmp);
930 return tmp;
931 }
932
profile_branch(If * if_instr,If::Condition cond)933 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
934 if (if_instr->should_profile()) {
935 ciMethod* method = if_instr->profiled_method();
936 assert(method != NULL, "method should be set if branch is profiled");
937 ciMethodData* md = method->method_data_or_null();
938 assert(md != NULL, "Sanity");
939 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
940 assert(data != NULL, "must have profiling data");
941 assert(data->is_BranchData(), "need BranchData for two-way branches");
942 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
943 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
944 if (if_instr->is_swapped()) {
945 int t = taken_count_offset;
946 taken_count_offset = not_taken_count_offset;
947 not_taken_count_offset = t;
948 }
949
950 LIR_Opr md_reg = new_register(T_METADATA);
951 __ metadata2reg(md->constant_encoding(), md_reg);
952
953 LIR_Opr data_offset_reg = new_pointer_register();
954 __ cmove(lir_cond(cond),
955 LIR_OprFact::intptrConst(taken_count_offset),
956 LIR_OprFact::intptrConst(not_taken_count_offset),
957 data_offset_reg, as_BasicType(if_instr->x()->type()));
958
959 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
960 LIR_Opr data_reg = new_pointer_register();
961 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
962 __ move(data_addr, data_reg);
963 // Use leal instead of add to avoid destroying condition codes on x86
964 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
965 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
966 __ move(data_reg, data_addr);
967 }
968 }
969
970 // Phi technique:
971 // This is about passing live values from one basic block to the other.
972 // In code generated with Java it is rather rare that more than one
973 // value is on the stack from one basic block to the other.
974 // We optimize our technique for efficient passing of one value
975 // (of type long, int, double..) but it can be extended.
976 // When entering or leaving a basic block, all registers and all spill
977 // slots are release and empty. We use the released registers
978 // and spill slots to pass the live values from one block
979 // to the other. The topmost value, i.e., the value on TOS of expression
980 // stack is passed in registers. All other values are stored in spilling
981 // area. Every Phi has an index which designates its spill slot
982 // At exit of a basic block, we fill the register(s) and spill slots.
983 // At entry of a basic block, the block_prolog sets up the content of phi nodes
984 // and locks necessary registers and spilling slots.
985
986
987 // move current value to referenced phi function
move_to_phi(PhiResolver * resolver,Value cur_val,Value sux_val)988 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
989 Phi* phi = sux_val->as_Phi();
990 // cur_val can be null without phi being null in conjunction with inlining
991 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
992 Phi* cur_phi = cur_val->as_Phi();
993 if (cur_phi != NULL && cur_phi->is_illegal()) {
994 // Phi and local would need to get invalidated
995 // (which is unexpected for Linear Scan).
996 // But this case is very rare so we simply bail out.
997 bailout("propagation of illegal phi");
998 return;
999 }
1000 LIR_Opr operand = cur_val->operand();
1001 if (operand->is_illegal()) {
1002 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1003 "these can be produced lazily");
1004 operand = operand_for_instruction(cur_val);
1005 }
1006 resolver->move(operand, operand_for_instruction(phi));
1007 }
1008 }
1009
1010
1011 // Moves all stack values into their PHI position
move_to_phi(ValueStack * cur_state)1012 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1013 BlockBegin* bb = block();
1014 if (bb->number_of_sux() == 1) {
1015 BlockBegin* sux = bb->sux_at(0);
1016 assert(sux->number_of_preds() > 0, "invalid CFG");
1017
1018 // a block with only one predecessor never has phi functions
1019 if (sux->number_of_preds() > 1) {
1020 PhiResolver resolver(this);
1021
1022 ValueStack* sux_state = sux->state();
1023 Value sux_value;
1024 int index;
1025
1026 assert(cur_state->scope() == sux_state->scope(), "not matching");
1027 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1028 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1029
1030 for_each_stack_value(sux_state, index, sux_value) {
1031 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1032 }
1033
1034 for_each_local_value(sux_state, index, sux_value) {
1035 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1036 }
1037
1038 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1039 }
1040 }
1041 }
1042
1043
new_register(BasicType type)1044 LIR_Opr LIRGenerator::new_register(BasicType type) {
1045 int vreg = _virtual_register_number;
1046 // add a little fudge factor for the bailout, since the bailout is
1047 // only checked periodically. This gives a few extra registers to
1048 // hand out before we really run out, which helps us keep from
1049 // tripping over assertions.
1050 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1051 bailout("out of virtual registers");
1052 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1053 // wrap it around
1054 _virtual_register_number = LIR_OprDesc::vreg_base;
1055 }
1056 }
1057 _virtual_register_number += 1;
1058 return LIR_OprFact::virtual_register(vreg, type);
1059 }
1060
1061
1062 // Try to lock using register in hint
rlock(Value instr)1063 LIR_Opr LIRGenerator::rlock(Value instr) {
1064 return new_register(instr->type());
1065 }
1066
1067
1068 // does an rlock and sets result
rlock_result(Value x)1069 LIR_Opr LIRGenerator::rlock_result(Value x) {
1070 LIR_Opr reg = rlock(x);
1071 set_result(x, reg);
1072 return reg;
1073 }
1074
1075
1076 // does an rlock and sets result
rlock_result(Value x,BasicType type)1077 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1078 LIR_Opr reg;
1079 switch (type) {
1080 case T_BYTE:
1081 case T_BOOLEAN:
1082 reg = rlock_byte(type);
1083 break;
1084 default:
1085 reg = rlock(x);
1086 break;
1087 }
1088
1089 set_result(x, reg);
1090 return reg;
1091 }
1092
1093
1094 //---------------------------------------------------------------------
get_jobject_constant(Value value)1095 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1096 ObjectType* oc = value->type()->as_ObjectType();
1097 if (oc) {
1098 return oc->constant_value();
1099 }
1100 return NULL;
1101 }
1102
1103
do_ExceptionObject(ExceptionObject * x)1104 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1105 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1106 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1107
1108 // no moves are created for phi functions at the begin of exception
1109 // handlers, so assign operands manually here
1110 for_each_phi_fun(block(), phi,
1111 if (!phi->is_illegal()) { operand_for_instruction(phi); });
1112
1113 LIR_Opr thread_reg = getThreadPointer();
1114 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1115 exceptionOopOpr());
1116 __ move_wide(LIR_OprFact::oopConst(NULL),
1117 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1118 __ move_wide(LIR_OprFact::oopConst(NULL),
1119 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1120
1121 LIR_Opr result = new_register(T_OBJECT);
1122 __ move(exceptionOopOpr(), result);
1123 set_result(x, result);
1124 }
1125
1126
1127 //----------------------------------------------------------------------
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 // visitor functions
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136
do_Phi(Phi * x)1137 void LIRGenerator::do_Phi(Phi* x) {
1138 // phi functions are never visited directly
1139 ShouldNotReachHere();
1140 }
1141
1142
1143 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
do_Constant(Constant * x)1144 void LIRGenerator::do_Constant(Constant* x) {
1145 if (x->state_before() != NULL) {
1146 // Any constant with a ValueStack requires patching so emit the patch here
1147 LIR_Opr reg = rlock_result(x);
1148 CodeEmitInfo* info = state_for(x, x->state_before());
1149 __ oop2reg_patch(NULL, reg, info);
1150 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1151 if (!x->is_pinned()) {
1152 // unpinned constants are handled specially so that they can be
1153 // put into registers when they are used multiple times within a
1154 // block. After the block completes their operand will be
1155 // cleared so that other blocks can't refer to that register.
1156 set_result(x, load_constant(x));
1157 } else {
1158 LIR_Opr res = x->operand();
1159 if (!res->is_valid()) {
1160 res = LIR_OprFact::value_type(x->type());
1161 }
1162 if (res->is_constant()) {
1163 LIR_Opr reg = rlock_result(x);
1164 __ move(res, reg);
1165 } else {
1166 set_result(x, res);
1167 }
1168 }
1169 } else {
1170 set_result(x, LIR_OprFact::value_type(x->type()));
1171 }
1172 }
1173
1174
do_Local(Local * x)1175 void LIRGenerator::do_Local(Local* x) {
1176 // operand_for_instruction has the side effect of setting the result
1177 // so there's no need to do it here.
1178 operand_for_instruction(x);
1179 }
1180
1181
do_IfInstanceOf(IfInstanceOf * x)1182 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1183 Unimplemented();
1184 }
1185
1186
do_Return(Return * x)1187 void LIRGenerator::do_Return(Return* x) {
1188 if (compilation()->env()->dtrace_method_probes()) {
1189 BasicTypeList signature;
1190 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1191 signature.append(T_METADATA); // Method*
1192 LIR_OprList* args = new LIR_OprList();
1193 args->append(getThreadPointer());
1194 LIR_Opr meth = new_register(T_METADATA);
1195 __ metadata2reg(method()->constant_encoding(), meth);
1196 args->append(meth);
1197 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1198 }
1199
1200 if (x->type()->is_void()) {
1201 __ return_op(LIR_OprFact::illegalOpr);
1202 } else {
1203 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1204 LIRItem result(x->result(), this);
1205
1206 result.load_item_force(reg);
1207 __ return_op(result.result());
1208 }
1209 set_no_result(x);
1210 }
1211
1212 // Examble: ref.get()
1213 // Combination of LoadField and g1 pre-write barrier
do_Reference_get(Intrinsic * x)1214 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1215
1216 const int referent_offset = java_lang_ref_Reference::referent_offset;
1217 guarantee(referent_offset > 0, "referent offset not initialized");
1218
1219 assert(x->number_of_arguments() == 1, "wrong type");
1220
1221 LIRItem reference(x->argument_at(0), this);
1222 reference.load_item();
1223
1224 // need to perform the null check on the reference objecy
1225 CodeEmitInfo* info = NULL;
1226 if (x->needs_null_check()) {
1227 info = state_for(x);
1228 }
1229
1230 LIR_Opr result = rlock_result(x, T_OBJECT);
1231 access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
1232 reference, LIR_OprFact::intConst(referent_offset), result);
1233 }
1234
1235 // Example: clazz.isInstance(object)
do_isInstance(Intrinsic * x)1236 void LIRGenerator::do_isInstance(Intrinsic* x) {
1237 assert(x->number_of_arguments() == 2, "wrong type");
1238
1239 // TODO could try to substitute this node with an equivalent InstanceOf
1240 // if clazz is known to be a constant Class. This will pick up newly found
1241 // constants after HIR construction. I'll leave this to a future change.
1242
1243 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1244 // could follow the aastore example in a future change.
1245
1246 LIRItem clazz(x->argument_at(0), this);
1247 LIRItem object(x->argument_at(1), this);
1248 clazz.load_item();
1249 object.load_item();
1250 LIR_Opr result = rlock_result(x);
1251
1252 // need to perform null check on clazz
1253 if (x->needs_null_check()) {
1254 CodeEmitInfo* info = state_for(x);
1255 __ null_check(clazz.result(), info);
1256 }
1257
1258 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1259 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1260 x->type(),
1261 NULL); // NULL CodeEmitInfo results in a leaf call
1262 __ move(call_result, result);
1263 }
1264
1265 // Example: object.getClass ()
do_getClass(Intrinsic * x)1266 void LIRGenerator::do_getClass(Intrinsic* x) {
1267 assert(x->number_of_arguments() == 1, "wrong type");
1268
1269 LIRItem rcvr(x->argument_at(0), this);
1270 rcvr.load_item();
1271 LIR_Opr temp = new_register(T_METADATA);
1272 LIR_Opr result = rlock_result(x);
1273
1274 // need to perform the null check on the rcvr
1275 CodeEmitInfo* info = NULL;
1276 if (x->needs_null_check()) {
1277 info = state_for(x);
1278 }
1279
1280 // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1281 // meaning of these two is mixed up (see JDK-8026837).
1282 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1283 __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
1284 // mirror = ((OopHandle)mirror)->resolve();
1285 access_load(IN_NATIVE, T_OBJECT,
1286 LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
1287 }
1288
1289 // java.lang.Class::isPrimitive()
do_isPrimitive(Intrinsic * x)1290 void LIRGenerator::do_isPrimitive(Intrinsic* x) {
1291 assert(x->number_of_arguments() == 1, "wrong type");
1292
1293 LIRItem rcvr(x->argument_at(0), this);
1294 rcvr.load_item();
1295 LIR_Opr temp = new_register(T_METADATA);
1296 LIR_Opr result = rlock_result(x);
1297
1298 CodeEmitInfo* info = NULL;
1299 if (x->needs_null_check()) {
1300 info = state_for(x);
1301 }
1302
1303 __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1304 __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0));
1305 __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
1306 }
1307
1308
1309 // Example: Thread.currentThread()
do_currentThread(Intrinsic * x)1310 void LIRGenerator::do_currentThread(Intrinsic* x) {
1311 assert(x->number_of_arguments() == 0, "wrong type");
1312 LIR_Opr reg = rlock_result(x);
1313 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1314 }
1315
1316
do_RegisterFinalizer(Intrinsic * x)1317 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1318 assert(x->number_of_arguments() == 1, "wrong type");
1319 LIRItem receiver(x->argument_at(0), this);
1320
1321 receiver.load_item();
1322 BasicTypeList signature;
1323 signature.append(T_OBJECT); // receiver
1324 LIR_OprList* args = new LIR_OprList();
1325 args->append(receiver.result());
1326 CodeEmitInfo* info = state_for(x, x->state());
1327 call_runtime(&signature, args,
1328 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1329 voidType, info);
1330
1331 set_no_result(x);
1332 }
1333
1334
1335 //------------------------local access--------------------------------------
1336
operand_for_instruction(Instruction * x)1337 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1338 if (x->operand()->is_illegal()) {
1339 Constant* c = x->as_Constant();
1340 if (c != NULL) {
1341 x->set_operand(LIR_OprFact::value_type(c->type()));
1342 } else {
1343 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1344 // allocate a virtual register for this local or phi
1345 x->set_operand(rlock(x));
1346 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1347 }
1348 }
1349 return x->operand();
1350 }
1351
1352
instruction_for_opr(LIR_Opr opr)1353 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1354 if (opr->is_virtual()) {
1355 return instruction_for_vreg(opr->vreg_number());
1356 }
1357 return NULL;
1358 }
1359
1360
instruction_for_vreg(int reg_num)1361 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1362 if (reg_num < _instruction_for_operand.length()) {
1363 return _instruction_for_operand.at(reg_num);
1364 }
1365 return NULL;
1366 }
1367
1368
set_vreg_flag(int vreg_num,VregFlag f)1369 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1370 if (_vreg_flags.size_in_bits() == 0) {
1371 BitMap2D temp(100, num_vreg_flags);
1372 _vreg_flags = temp;
1373 }
1374 _vreg_flags.at_put_grow(vreg_num, f, true);
1375 }
1376
is_vreg_flag_set(int vreg_num,VregFlag f)1377 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1378 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1379 return false;
1380 }
1381 return _vreg_flags.at(vreg_num, f);
1382 }
1383
1384
1385 // Block local constant handling. This code is useful for keeping
1386 // unpinned constants and constants which aren't exposed in the IR in
1387 // registers. Unpinned Constant instructions have their operands
1388 // cleared when the block is finished so that other blocks can't end
1389 // up referring to their registers.
1390
load_constant(Constant * x)1391 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1392 assert(!x->is_pinned(), "only for unpinned constants");
1393 _unpinned_constants.append(x);
1394 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1395 }
1396
1397
load_constant(LIR_Const * c)1398 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1399 BasicType t = c->type();
1400 for (int i = 0; i < _constants.length(); i++) {
1401 LIR_Const* other = _constants.at(i);
1402 if (t == other->type()) {
1403 switch (t) {
1404 case T_INT:
1405 case T_FLOAT:
1406 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1407 break;
1408 case T_LONG:
1409 case T_DOUBLE:
1410 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1411 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1412 break;
1413 case T_OBJECT:
1414 if (c->as_jobject() != other->as_jobject()) continue;
1415 break;
1416 default:
1417 break;
1418 }
1419 return _reg_for_constants.at(i);
1420 }
1421 }
1422
1423 LIR_Opr result = new_register(t);
1424 __ move((LIR_Opr)c, result);
1425 _constants.append(c);
1426 _reg_for_constants.append(result);
1427 return result;
1428 }
1429
1430 //------------------------field access--------------------------------------
1431
do_CompareAndSwap(Intrinsic * x,ValueType * type)1432 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
1433 assert(x->number_of_arguments() == 4, "wrong type");
1434 LIRItem obj (x->argument_at(0), this); // object
1435 LIRItem offset(x->argument_at(1), this); // offset of field
1436 LIRItem cmp (x->argument_at(2), this); // value to compare with field
1437 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
1438 assert(obj.type()->tag() == objectTag, "invalid type");
1439
1440 // In 64bit the type can be long, sparc doesn't have this assert
1441 // assert(offset.type()->tag() == intTag, "invalid type");
1442
1443 assert(cmp.type()->tag() == type->tag(), "invalid type");
1444 assert(val.type()->tag() == type->tag(), "invalid type");
1445
1446 LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
1447 obj, offset, cmp, val);
1448 set_result(x, result);
1449 }
1450
1451 // Comment copied form templateTable_i486.cpp
1452 // ----------------------------------------------------------------------------
1453 // Volatile variables demand their effects be made known to all CPU's in
1454 // order. Store buffers on most chips allow reads & writes to reorder; the
1455 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1456 // memory barrier (i.e., it's not sufficient that the interpreter does not
1457 // reorder volatile references, the hardware also must not reorder them).
1458 //
1459 // According to the new Java Memory Model (JMM):
1460 // (1) All volatiles are serialized wrt to each other.
1461 // ALSO reads & writes act as aquire & release, so:
1462 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1463 // the read float up to before the read. It's OK for non-volatile memory refs
1464 // that happen before the volatile read to float down below it.
1465 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1466 // that happen BEFORE the write float down to after the write. It's OK for
1467 // non-volatile memory refs that happen after the volatile write to float up
1468 // before it.
1469 //
1470 // We only put in barriers around volatile refs (they are expensive), not
1471 // _between_ memory refs (that would require us to track the flavor of the
1472 // previous memory refs). Requirements (2) and (3) require some barriers
1473 // before volatile stores and after volatile loads. These nearly cover
1474 // requirement (1) but miss the volatile-store-volatile-load case. This final
1475 // case is placed after volatile-stores although it could just as well go
1476 // before volatile-loads.
1477
1478
do_StoreField(StoreField * x)1479 void LIRGenerator::do_StoreField(StoreField* x) {
1480 bool needs_patching = x->needs_patching();
1481 bool is_volatile = x->field()->is_volatile();
1482 BasicType field_type = x->field_type();
1483
1484 CodeEmitInfo* info = NULL;
1485 if (needs_patching) {
1486 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1487 info = state_for(x, x->state_before());
1488 } else if (x->needs_null_check()) {
1489 NullCheck* nc = x->explicit_null_check();
1490 if (nc == NULL) {
1491 info = state_for(x);
1492 } else {
1493 info = state_for(nc);
1494 }
1495 }
1496
1497 LIRItem object(x->obj(), this);
1498 LIRItem value(x->value(), this);
1499
1500 object.load_item();
1501
1502 if (is_volatile || needs_patching) {
1503 // load item if field is volatile (fewer special cases for volatiles)
1504 // load item if field not initialized
1505 // load item if field not constant
1506 // because of code patching we cannot inline constants
1507 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1508 value.load_byte_item();
1509 } else {
1510 value.load_item();
1511 }
1512 } else {
1513 value.load_for_store(field_type);
1514 }
1515
1516 set_no_result(x);
1517
1518 #ifndef PRODUCT
1519 if (PrintNotLoaded && needs_patching) {
1520 tty->print_cr(" ###class not loaded at store_%s bci %d",
1521 x->is_static() ? "static" : "field", x->printable_bci());
1522 }
1523 #endif
1524
1525 if (x->needs_null_check() &&
1526 (needs_patching ||
1527 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1528 // Emit an explicit null check because the offset is too large.
1529 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1530 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1531 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1532 }
1533
1534 DecoratorSet decorators = IN_HEAP;
1535 if (is_volatile) {
1536 decorators |= MO_SEQ_CST;
1537 }
1538 if (needs_patching) {
1539 decorators |= C1_NEEDS_PATCHING;
1540 }
1541
1542 access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1543 value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1544 }
1545
do_StoreIndexed(StoreIndexed * x)1546 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1547 assert(x->is_pinned(),"");
1548 bool needs_range_check = x->compute_needs_range_check();
1549 bool use_length = x->length() != NULL;
1550 bool obj_store = is_reference_type(x->elt_type());
1551 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
1552 !get_jobject_constant(x->value())->is_null_object() ||
1553 x->should_profile());
1554
1555 LIRItem array(x->array(), this);
1556 LIRItem index(x->index(), this);
1557 LIRItem value(x->value(), this);
1558 LIRItem length(this);
1559
1560 array.load_item();
1561 index.load_nonconstant();
1562
1563 if (use_length && needs_range_check) {
1564 length.set_instruction(x->length());
1565 length.load_item();
1566
1567 }
1568 if (needs_store_check || x->check_boolean()) {
1569 value.load_item();
1570 } else {
1571 value.load_for_store(x->elt_type());
1572 }
1573
1574 set_no_result(x);
1575
1576 // the CodeEmitInfo must be duplicated for each different
1577 // LIR-instruction because spilling can occur anywhere between two
1578 // instructions and so the debug information must be different
1579 CodeEmitInfo* range_check_info = state_for(x);
1580 CodeEmitInfo* null_check_info = NULL;
1581 if (x->needs_null_check()) {
1582 null_check_info = new CodeEmitInfo(range_check_info);
1583 }
1584
1585 if (GenerateRangeChecks && needs_range_check) {
1586 if (use_length) {
1587 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1588 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1589 } else {
1590 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1591 // range_check also does the null check
1592 null_check_info = NULL;
1593 }
1594 }
1595
1596 if (GenerateArrayStoreCheck && needs_store_check) {
1597 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1598 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1599 }
1600
1601 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1602 if (x->check_boolean()) {
1603 decorators |= C1_MASK_BOOLEAN;
1604 }
1605
1606 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1607 NULL, null_check_info);
1608 }
1609
access_load_at(DecoratorSet decorators,BasicType type,LIRItem & base,LIR_Opr offset,LIR_Opr result,CodeEmitInfo * patch_info,CodeEmitInfo * load_emit_info)1610 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1611 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1612 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1613 decorators |= ACCESS_READ;
1614 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1615 if (access.is_raw()) {
1616 _barrier_set->BarrierSetC1::load_at(access, result);
1617 } else {
1618 _barrier_set->load_at(access, result);
1619 }
1620 }
1621
access_load(DecoratorSet decorators,BasicType type,LIR_Opr addr,LIR_Opr result)1622 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1623 LIR_Opr addr, LIR_Opr result) {
1624 decorators |= ACCESS_READ;
1625 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1626 access.set_resolved_addr(addr);
1627 if (access.is_raw()) {
1628 _barrier_set->BarrierSetC1::load(access, result);
1629 } else {
1630 _barrier_set->load(access, result);
1631 }
1632 }
1633
access_store_at(DecoratorSet decorators,BasicType type,LIRItem & base,LIR_Opr offset,LIR_Opr value,CodeEmitInfo * patch_info,CodeEmitInfo * store_emit_info)1634 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
1635 LIRItem& base, LIR_Opr offset, LIR_Opr value,
1636 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
1637 decorators |= ACCESS_WRITE;
1638 LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
1639 if (access.is_raw()) {
1640 _barrier_set->BarrierSetC1::store_at(access, value);
1641 } else {
1642 _barrier_set->store_at(access, value);
1643 }
1644 }
1645
access_atomic_cmpxchg_at(DecoratorSet decorators,BasicType type,LIRItem & base,LIRItem & offset,LIRItem & cmp_value,LIRItem & new_value)1646 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
1647 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
1648 decorators |= ACCESS_READ;
1649 decorators |= ACCESS_WRITE;
1650 // Atomic operations are SEQ_CST by default
1651 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1652 LIRAccess access(this, decorators, base, offset, type);
1653 if (access.is_raw()) {
1654 return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
1655 } else {
1656 return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
1657 }
1658 }
1659
access_atomic_xchg_at(DecoratorSet decorators,BasicType type,LIRItem & base,LIRItem & offset,LIRItem & value)1660 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
1661 LIRItem& base, LIRItem& offset, LIRItem& value) {
1662 decorators |= ACCESS_READ;
1663 decorators |= ACCESS_WRITE;
1664 // Atomic operations are SEQ_CST by default
1665 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1666 LIRAccess access(this, decorators, base, offset, type);
1667 if (access.is_raw()) {
1668 return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
1669 } else {
1670 return _barrier_set->atomic_xchg_at(access, value);
1671 }
1672 }
1673
access_atomic_add_at(DecoratorSet decorators,BasicType type,LIRItem & base,LIRItem & offset,LIRItem & value)1674 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
1675 LIRItem& base, LIRItem& offset, LIRItem& value) {
1676 decorators |= ACCESS_READ;
1677 decorators |= ACCESS_WRITE;
1678 // Atomic operations are SEQ_CST by default
1679 decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;
1680 LIRAccess access(this, decorators, base, offset, type);
1681 if (access.is_raw()) {
1682 return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
1683 } else {
1684 return _barrier_set->atomic_add_at(access, value);
1685 }
1686 }
1687
access_resolve(DecoratorSet decorators,LIR_Opr obj)1688 LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
1689 // Use stronger ACCESS_WRITE|ACCESS_READ by default.
1690 if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
1691 decorators |= ACCESS_READ | ACCESS_WRITE;
1692 }
1693
1694 return _barrier_set->resolve(this, decorators, obj);
1695 }
1696
do_LoadField(LoadField * x)1697 void LIRGenerator::do_LoadField(LoadField* x) {
1698 bool needs_patching = x->needs_patching();
1699 bool is_volatile = x->field()->is_volatile();
1700 BasicType field_type = x->field_type();
1701
1702 CodeEmitInfo* info = NULL;
1703 if (needs_patching) {
1704 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1705 info = state_for(x, x->state_before());
1706 } else if (x->needs_null_check()) {
1707 NullCheck* nc = x->explicit_null_check();
1708 if (nc == NULL) {
1709 info = state_for(x);
1710 } else {
1711 info = state_for(nc);
1712 }
1713 }
1714
1715 LIRItem object(x->obj(), this);
1716
1717 object.load_item();
1718
1719 #ifndef PRODUCT
1720 if (PrintNotLoaded && needs_patching) {
1721 tty->print_cr(" ###class not loaded at load_%s bci %d",
1722 x->is_static() ? "static" : "field", x->printable_bci());
1723 }
1724 #endif
1725
1726 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1727 if (x->needs_null_check() &&
1728 (needs_patching ||
1729 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1730 stress_deopt)) {
1731 LIR_Opr obj = object.result();
1732 if (stress_deopt) {
1733 obj = new_register(T_OBJECT);
1734 __ move(LIR_OprFact::oopConst(NULL), obj);
1735 }
1736 // Emit an explicit null check because the offset is too large.
1737 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1738 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1739 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1740 }
1741
1742 DecoratorSet decorators = IN_HEAP;
1743 if (is_volatile) {
1744 decorators |= MO_SEQ_CST;
1745 }
1746 if (needs_patching) {
1747 decorators |= C1_NEEDS_PATCHING;
1748 }
1749
1750 LIR_Opr result = rlock_result(x, field_type);
1751 access_load_at(decorators, field_type,
1752 object, LIR_OprFact::intConst(x->offset()), result,
1753 info ? new CodeEmitInfo(info) : NULL, info);
1754 }
1755
1756
1757 //------------------------java.nio.Buffer.checkIndex------------------------
1758
1759 // int java.nio.Buffer.checkIndex(int)
do_NIOCheckIndex(Intrinsic * x)1760 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1761 // NOTE: by the time we are in checkIndex() we are guaranteed that
1762 // the buffer is non-null (because checkIndex is package-private and
1763 // only called from within other methods in the buffer).
1764 assert(x->number_of_arguments() == 2, "wrong type");
1765 LIRItem buf (x->argument_at(0), this);
1766 LIRItem index(x->argument_at(1), this);
1767 buf.load_item();
1768 index.load_item();
1769
1770 LIR_Opr result = rlock_result(x);
1771 if (GenerateRangeChecks) {
1772 CodeEmitInfo* info = state_for(x);
1773 CodeStub* stub = new RangeCheckStub(info, index.result());
1774 LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
1775 if (index.result()->is_constant()) {
1776 cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1777 __ branch(lir_cond_belowEqual, T_INT, stub);
1778 } else {
1779 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
1780 java_nio_Buffer::limit_offset(), T_INT, info);
1781 __ branch(lir_cond_aboveEqual, T_INT, stub);
1782 }
1783 __ move(index.result(), result);
1784 } else {
1785 // Just load the index into the result register
1786 __ move(index.result(), result);
1787 }
1788 }
1789
1790
1791 //------------------------array access--------------------------------------
1792
1793
do_ArrayLength(ArrayLength * x)1794 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1795 LIRItem array(x->array(), this);
1796 array.load_item();
1797 LIR_Opr reg = rlock_result(x);
1798
1799 CodeEmitInfo* info = NULL;
1800 if (x->needs_null_check()) {
1801 NullCheck* nc = x->explicit_null_check();
1802 if (nc == NULL) {
1803 info = state_for(x);
1804 } else {
1805 info = state_for(nc);
1806 }
1807 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1808 LIR_Opr obj = new_register(T_OBJECT);
1809 __ move(LIR_OprFact::oopConst(NULL), obj);
1810 __ null_check(obj, new CodeEmitInfo(info));
1811 }
1812 }
1813 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1814 }
1815
1816
do_LoadIndexed(LoadIndexed * x)1817 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1818 bool use_length = x->length() != NULL;
1819 LIRItem array(x->array(), this);
1820 LIRItem index(x->index(), this);
1821 LIRItem length(this);
1822 bool needs_range_check = x->compute_needs_range_check();
1823
1824 if (use_length && needs_range_check) {
1825 length.set_instruction(x->length());
1826 length.load_item();
1827 }
1828
1829 array.load_item();
1830 if (index.is_constant() && can_inline_as_constant(x->index())) {
1831 // let it be a constant
1832 index.dont_load_item();
1833 } else {
1834 index.load_item();
1835 }
1836
1837 CodeEmitInfo* range_check_info = state_for(x);
1838 CodeEmitInfo* null_check_info = NULL;
1839 if (x->needs_null_check()) {
1840 NullCheck* nc = x->explicit_null_check();
1841 if (nc != NULL) {
1842 null_check_info = state_for(nc);
1843 } else {
1844 null_check_info = range_check_info;
1845 }
1846 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1847 LIR_Opr obj = new_register(T_OBJECT);
1848 __ move(LIR_OprFact::oopConst(NULL), obj);
1849 __ null_check(obj, new CodeEmitInfo(null_check_info));
1850 }
1851 }
1852
1853 if (GenerateRangeChecks && needs_range_check) {
1854 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1855 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1856 } else if (use_length) {
1857 // TODO: use a (modified) version of array_range_check that does not require a
1858 // constant length to be loaded to a register
1859 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1860 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1861 } else {
1862 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1863 // The range check performs the null check, so clear it out for the load
1864 null_check_info = NULL;
1865 }
1866 }
1867
1868 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1869
1870 LIR_Opr result = rlock_result(x, x->elt_type());
1871 access_load_at(decorators, x->elt_type(),
1872 array, index.result(), result,
1873 NULL, null_check_info);
1874 }
1875
1876
do_NullCheck(NullCheck * x)1877 void LIRGenerator::do_NullCheck(NullCheck* x) {
1878 if (x->can_trap()) {
1879 LIRItem value(x->obj(), this);
1880 value.load_item();
1881 CodeEmitInfo* info = state_for(x);
1882 __ null_check(value.result(), info);
1883 }
1884 }
1885
1886
do_TypeCast(TypeCast * x)1887 void LIRGenerator::do_TypeCast(TypeCast* x) {
1888 LIRItem value(x->obj(), this);
1889 value.load_item();
1890 // the result is the same as from the node we are casting
1891 set_result(x, value.result());
1892 }
1893
1894
do_Throw(Throw * x)1895 void LIRGenerator::do_Throw(Throw* x) {
1896 LIRItem exception(x->exception(), this);
1897 exception.load_item();
1898 set_no_result(x);
1899 LIR_Opr exception_opr = exception.result();
1900 CodeEmitInfo* info = state_for(x, x->state());
1901
1902 #ifndef PRODUCT
1903 if (PrintC1Statistics) {
1904 increment_counter(Runtime1::throw_count_address(), T_INT);
1905 }
1906 #endif
1907
1908 // check if the instruction has an xhandler in any of the nested scopes
1909 bool unwind = false;
1910 if (info->exception_handlers()->length() == 0) {
1911 // this throw is not inside an xhandler
1912 unwind = true;
1913 } else {
1914 // get some idea of the throw type
1915 bool type_is_exact = true;
1916 ciType* throw_type = x->exception()->exact_type();
1917 if (throw_type == NULL) {
1918 type_is_exact = false;
1919 throw_type = x->exception()->declared_type();
1920 }
1921 if (throw_type != NULL && throw_type->is_instance_klass()) {
1922 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1923 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1924 }
1925 }
1926
1927 // do null check before moving exception oop into fixed register
1928 // to avoid a fixed interval with an oop during the null check.
1929 // Use a copy of the CodeEmitInfo because debug information is
1930 // different for null_check and throw.
1931 if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) {
1932 // if the exception object wasn't created using new then it might be null.
1933 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1934 }
1935
1936 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1937 // we need to go through the exception lookup path to get JVMTI
1938 // notification done
1939 unwind = false;
1940 }
1941
1942 // move exception oop into fixed register
1943 __ move(exception_opr, exceptionOopOpr());
1944
1945 if (unwind) {
1946 __ unwind_exception(exceptionOopOpr());
1947 } else {
1948 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
1949 }
1950 }
1951
1952
do_RoundFP(RoundFP * x)1953 void LIRGenerator::do_RoundFP(RoundFP* x) {
1954 LIRItem input(x->input(), this);
1955 input.load_item();
1956 LIR_Opr input_opr = input.result();
1957 assert(input_opr->is_register(), "why round if value is not in a register?");
1958 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
1959 if (input_opr->is_single_fpu()) {
1960 set_result(x, round_item(input_opr)); // This code path not currently taken
1961 } else {
1962 LIR_Opr result = new_register(T_DOUBLE);
1963 set_vreg_flag(result, must_start_in_memory);
1964 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
1965 set_result(x, result);
1966 }
1967 }
1968
1969 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
1970 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
do_UnsafeGetRaw(UnsafeGetRaw * x)1971 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
1972 LIRItem base(x->base(), this);
1973 LIRItem idx(this);
1974
1975 base.load_item();
1976 if (x->has_index()) {
1977 idx.set_instruction(x->index());
1978 idx.load_nonconstant();
1979 }
1980
1981 LIR_Opr reg = rlock_result(x, x->basic_type());
1982
1983 int log2_scale = 0;
1984 if (x->has_index()) {
1985 log2_scale = x->log2_scale();
1986 }
1987
1988 assert(!x->has_index() || idx.value() == x->index(), "should match");
1989
1990 LIR_Opr base_op = base.result();
1991 LIR_Opr index_op = idx.result();
1992 #ifndef _LP64
1993 if (base_op->type() == T_LONG) {
1994 base_op = new_register(T_INT);
1995 __ convert(Bytecodes::_l2i, base.result(), base_op);
1996 }
1997 if (x->has_index()) {
1998 if (index_op->type() == T_LONG) {
1999 LIR_Opr long_index_op = index_op;
2000 if (index_op->is_constant()) {
2001 long_index_op = new_register(T_LONG);
2002 __ move(index_op, long_index_op);
2003 }
2004 index_op = new_register(T_INT);
2005 __ convert(Bytecodes::_l2i, long_index_op, index_op);
2006 } else {
2007 assert(x->index()->type()->tag() == intTag, "must be");
2008 }
2009 }
2010 // At this point base and index should be all ints.
2011 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2012 assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2013 #else
2014 if (x->has_index()) {
2015 if (index_op->type() == T_INT) {
2016 if (!index_op->is_constant()) {
2017 index_op = new_register(T_LONG);
2018 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2019 }
2020 } else {
2021 assert(index_op->type() == T_LONG, "must be");
2022 if (index_op->is_constant()) {
2023 index_op = new_register(T_LONG);
2024 __ move(idx.result(), index_op);
2025 }
2026 }
2027 }
2028 // At this point base is a long non-constant
2029 // Index is a long register or a int constant.
2030 // We allow the constant to stay an int because that would allow us a more compact encoding by
2031 // embedding an immediate offset in the address expression. If we have a long constant, we have to
2032 // move it into a register first.
2033 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2034 assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2035 (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2036 #endif
2037
2038 BasicType dst_type = x->basic_type();
2039
2040 LIR_Address* addr;
2041 if (index_op->is_constant()) {
2042 assert(log2_scale == 0, "must not have a scale");
2043 assert(index_op->type() == T_INT, "only int constants supported");
2044 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2045 } else {
2046 #ifdef X86
2047 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2048 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2049 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2050 #else
2051 if (index_op->is_illegal() || log2_scale == 0) {
2052 addr = new LIR_Address(base_op, index_op, dst_type);
2053 } else {
2054 LIR_Opr tmp = new_pointer_register();
2055 __ shift_left(index_op, log2_scale, tmp);
2056 addr = new LIR_Address(base_op, tmp, dst_type);
2057 }
2058 #endif
2059 }
2060
2061 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2062 __ unaligned_move(addr, reg);
2063 } else {
2064 if (dst_type == T_OBJECT && x->is_wide()) {
2065 __ move_wide(addr, reg);
2066 } else {
2067 __ move(addr, reg);
2068 }
2069 }
2070 }
2071
2072
do_UnsafePutRaw(UnsafePutRaw * x)2073 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2074 int log2_scale = 0;
2075 BasicType type = x->basic_type();
2076
2077 if (x->has_index()) {
2078 log2_scale = x->log2_scale();
2079 }
2080
2081 LIRItem base(x->base(), this);
2082 LIRItem value(x->value(), this);
2083 LIRItem idx(this);
2084
2085 base.load_item();
2086 if (x->has_index()) {
2087 idx.set_instruction(x->index());
2088 idx.load_item();
2089 }
2090
2091 if (type == T_BYTE || type == T_BOOLEAN) {
2092 value.load_byte_item();
2093 } else {
2094 value.load_item();
2095 }
2096
2097 set_no_result(x);
2098
2099 LIR_Opr base_op = base.result();
2100 LIR_Opr index_op = idx.result();
2101
2102 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2103 LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2104 #else
2105 #ifndef _LP64
2106 if (base_op->type() == T_LONG) {
2107 base_op = new_register(T_INT);
2108 __ convert(Bytecodes::_l2i, base.result(), base_op);
2109 }
2110 if (x->has_index()) {
2111 if (index_op->type() == T_LONG) {
2112 index_op = new_register(T_INT);
2113 __ convert(Bytecodes::_l2i, idx.result(), index_op);
2114 }
2115 }
2116 // At this point base and index should be all ints and not constants
2117 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2118 assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2119 #else
2120 if (x->has_index()) {
2121 if (index_op->type() == T_INT) {
2122 index_op = new_register(T_LONG);
2123 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2124 }
2125 }
2126 // At this point base and index are long and non-constant
2127 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2128 assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2129 #endif
2130
2131 if (log2_scale != 0) {
2132 // temporary fix (platform dependent code without shift on Intel would be better)
2133 // TODO: ARM also allows embedded shift in the address
2134 LIR_Opr tmp = new_pointer_register();
2135 if (TwoOperandLIRForm) {
2136 __ move(index_op, tmp);
2137 index_op = tmp;
2138 }
2139 __ shift_left(index_op, log2_scale, tmp);
2140 if (!TwoOperandLIRForm) {
2141 index_op = tmp;
2142 }
2143 }
2144
2145 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2146 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2147 __ move(value.result(), addr);
2148 }
2149
2150
do_UnsafeGetObject(UnsafeGetObject * x)2151 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2152 BasicType type = x->basic_type();
2153 LIRItem src(x->object(), this);
2154 LIRItem off(x->offset(), this);
2155
2156 off.load_item();
2157 src.load_item();
2158
2159 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2160
2161 if (x->is_volatile()) {
2162 decorators |= MO_SEQ_CST;
2163 }
2164 if (type == T_BOOLEAN) {
2165 decorators |= C1_MASK_BOOLEAN;
2166 }
2167 if (is_reference_type(type)) {
2168 decorators |= ON_UNKNOWN_OOP_REF;
2169 }
2170
2171 LIR_Opr result = rlock_result(x, type);
2172 access_load_at(decorators, type,
2173 src, off.result(), result);
2174 }
2175
2176
do_UnsafePutObject(UnsafePutObject * x)2177 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2178 BasicType type = x->basic_type();
2179 LIRItem src(x->object(), this);
2180 LIRItem off(x->offset(), this);
2181 LIRItem data(x->value(), this);
2182
2183 src.load_item();
2184 if (type == T_BOOLEAN || type == T_BYTE) {
2185 data.load_byte_item();
2186 } else {
2187 data.load_item();
2188 }
2189 off.load_item();
2190
2191 set_no_result(x);
2192
2193 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;
2194 if (is_reference_type(type)) {
2195 decorators |= ON_UNKNOWN_OOP_REF;
2196 }
2197 if (x->is_volatile()) {
2198 decorators |= MO_SEQ_CST;
2199 }
2200 access_store_at(decorators, type, src, off.result(), data.result());
2201 }
2202
do_UnsafeGetAndSetObject(UnsafeGetAndSetObject * x)2203 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
2204 BasicType type = x->basic_type();
2205 LIRItem src(x->object(), this);
2206 LIRItem off(x->offset(), this);
2207 LIRItem value(x->value(), this);
2208
2209 DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;
2210
2211 if (is_reference_type(type)) {
2212 decorators |= ON_UNKNOWN_OOP_REF;
2213 }
2214
2215 LIR_Opr result;
2216 if (x->is_add()) {
2217 result = access_atomic_add_at(decorators, type, src, off, value);
2218 } else {
2219 result = access_atomic_xchg_at(decorators, type, src, off, value);
2220 }
2221 set_result(x, result);
2222 }
2223
do_SwitchRanges(SwitchRangeArray * x,LIR_Opr value,BlockBegin * default_sux)2224 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2225 int lng = x->length();
2226
2227 for (int i = 0; i < lng; i++) {
2228 SwitchRange* one_range = x->at(i);
2229 int low_key = one_range->low_key();
2230 int high_key = one_range->high_key();
2231 BlockBegin* dest = one_range->sux();
2232 if (low_key == high_key) {
2233 __ cmp(lir_cond_equal, value, low_key);
2234 __ branch(lir_cond_equal, T_INT, dest);
2235 } else if (high_key - low_key == 1) {
2236 __ cmp(lir_cond_equal, value, low_key);
2237 __ branch(lir_cond_equal, T_INT, dest);
2238 __ cmp(lir_cond_equal, value, high_key);
2239 __ branch(lir_cond_equal, T_INT, dest);
2240 } else {
2241 LabelObj* L = new LabelObj();
2242 __ cmp(lir_cond_less, value, low_key);
2243 __ branch(lir_cond_less, T_INT, L->label());
2244 __ cmp(lir_cond_lessEqual, value, high_key);
2245 __ branch(lir_cond_lessEqual, T_INT, dest);
2246 __ branch_destination(L->label());
2247 }
2248 }
2249 __ jump(default_sux);
2250 }
2251
2252
create_lookup_ranges(TableSwitch * x)2253 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2254 SwitchRangeList* res = new SwitchRangeList();
2255 int len = x->length();
2256 if (len > 0) {
2257 BlockBegin* sux = x->sux_at(0);
2258 int key = x->lo_key();
2259 BlockBegin* default_sux = x->default_sux();
2260 SwitchRange* range = new SwitchRange(key, sux);
2261 for (int i = 0; i < len; i++, key++) {
2262 BlockBegin* new_sux = x->sux_at(i);
2263 if (sux == new_sux) {
2264 // still in same range
2265 range->set_high_key(key);
2266 } else {
2267 // skip tests which explicitly dispatch to the default
2268 if (sux != default_sux) {
2269 res->append(range);
2270 }
2271 range = new SwitchRange(key, new_sux);
2272 }
2273 sux = new_sux;
2274 }
2275 if (res->length() == 0 || res->last() != range) res->append(range);
2276 }
2277 return res;
2278 }
2279
2280
2281 // we expect the keys to be sorted by increasing value
create_lookup_ranges(LookupSwitch * x)2282 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2283 SwitchRangeList* res = new SwitchRangeList();
2284 int len = x->length();
2285 if (len > 0) {
2286 BlockBegin* default_sux = x->default_sux();
2287 int key = x->key_at(0);
2288 BlockBegin* sux = x->sux_at(0);
2289 SwitchRange* range = new SwitchRange(key, sux);
2290 for (int i = 1; i < len; i++) {
2291 int new_key = x->key_at(i);
2292 BlockBegin* new_sux = x->sux_at(i);
2293 if (key+1 == new_key && sux == new_sux) {
2294 // still in same range
2295 range->set_high_key(new_key);
2296 } else {
2297 // skip tests which explicitly dispatch to the default
2298 if (range->sux() != default_sux) {
2299 res->append(range);
2300 }
2301 range = new SwitchRange(new_key, new_sux);
2302 }
2303 key = new_key;
2304 sux = new_sux;
2305 }
2306 if (res->length() == 0 || res->last() != range) res->append(range);
2307 }
2308 return res;
2309 }
2310
2311
do_TableSwitch(TableSwitch * x)2312 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2313 LIRItem tag(x->tag(), this);
2314 tag.load_item();
2315 set_no_result(x);
2316
2317 if (x->is_safepoint()) {
2318 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2319 }
2320
2321 // move values into phi locations
2322 move_to_phi(x->state());
2323
2324 int lo_key = x->lo_key();
2325 int len = x->length();
2326 assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
2327 LIR_Opr value = tag.result();
2328
2329 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2330 ciMethod* method = x->state()->scope()->method();
2331 ciMethodData* md = method->method_data_or_null();
2332 assert(md != NULL, "Sanity");
2333 ciProfileData* data = md->bci_to_data(x->state()->bci());
2334 assert(data != NULL, "must have profiling data");
2335 assert(data->is_MultiBranchData(), "bad profile data?");
2336 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2337 LIR_Opr md_reg = new_register(T_METADATA);
2338 __ metadata2reg(md->constant_encoding(), md_reg);
2339 LIR_Opr data_offset_reg = new_pointer_register();
2340 LIR_Opr tmp_reg = new_pointer_register();
2341
2342 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2343 for (int i = 0; i < len; i++) {
2344 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2345 __ cmp(lir_cond_equal, value, i + lo_key);
2346 __ move(data_offset_reg, tmp_reg);
2347 __ cmove(lir_cond_equal,
2348 LIR_OprFact::intptrConst(count_offset),
2349 tmp_reg,
2350 data_offset_reg, T_INT);
2351 }
2352
2353 LIR_Opr data_reg = new_pointer_register();
2354 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2355 __ move(data_addr, data_reg);
2356 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2357 __ move(data_reg, data_addr);
2358 }
2359
2360 if (UseTableRanges) {
2361 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2362 } else {
2363 for (int i = 0; i < len; i++) {
2364 __ cmp(lir_cond_equal, value, i + lo_key);
2365 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2366 }
2367 __ jump(x->default_sux());
2368 }
2369 }
2370
2371
do_LookupSwitch(LookupSwitch * x)2372 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2373 LIRItem tag(x->tag(), this);
2374 tag.load_item();
2375 set_no_result(x);
2376
2377 if (x->is_safepoint()) {
2378 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2379 }
2380
2381 // move values into phi locations
2382 move_to_phi(x->state());
2383
2384 LIR_Opr value = tag.result();
2385 int len = x->length();
2386
2387 if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
2388 ciMethod* method = x->state()->scope()->method();
2389 ciMethodData* md = method->method_data_or_null();
2390 assert(md != NULL, "Sanity");
2391 ciProfileData* data = md->bci_to_data(x->state()->bci());
2392 assert(data != NULL, "must have profiling data");
2393 assert(data->is_MultiBranchData(), "bad profile data?");
2394 int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
2395 LIR_Opr md_reg = new_register(T_METADATA);
2396 __ metadata2reg(md->constant_encoding(), md_reg);
2397 LIR_Opr data_offset_reg = new_pointer_register();
2398 LIR_Opr tmp_reg = new_pointer_register();
2399
2400 __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
2401 for (int i = 0; i < len; i++) {
2402 int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
2403 __ cmp(lir_cond_equal, value, x->key_at(i));
2404 __ move(data_offset_reg, tmp_reg);
2405 __ cmove(lir_cond_equal,
2406 LIR_OprFact::intptrConst(count_offset),
2407 tmp_reg,
2408 data_offset_reg, T_INT);
2409 }
2410
2411 LIR_Opr data_reg = new_pointer_register();
2412 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
2413 __ move(data_addr, data_reg);
2414 __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
2415 __ move(data_reg, data_addr);
2416 }
2417
2418 if (UseTableRanges) {
2419 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2420 } else {
2421 int len = x->length();
2422 for (int i = 0; i < len; i++) {
2423 __ cmp(lir_cond_equal, value, x->key_at(i));
2424 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2425 }
2426 __ jump(x->default_sux());
2427 }
2428 }
2429
2430
do_Goto(Goto * x)2431 void LIRGenerator::do_Goto(Goto* x) {
2432 set_no_result(x);
2433
2434 if (block()->next()->as_OsrEntry()) {
2435 // need to free up storage used for OSR entry point
2436 LIR_Opr osrBuffer = block()->next()->operand();
2437 BasicTypeList signature;
2438 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2439 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2440 __ move(osrBuffer, cc->args()->at(0));
2441 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2442 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2443 }
2444
2445 if (x->is_safepoint()) {
2446 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2447
2448 // increment backedge counter if needed
2449 CodeEmitInfo* info = state_for(x, state);
2450 increment_backedge_counter(info, x->profiled_bci());
2451 CodeEmitInfo* safepoint_info = state_for(x, state);
2452 __ safepoint(safepoint_poll_register(), safepoint_info);
2453 }
2454
2455 // Gotos can be folded Ifs, handle this case.
2456 if (x->should_profile()) {
2457 ciMethod* method = x->profiled_method();
2458 assert(method != NULL, "method should be set if branch is profiled");
2459 ciMethodData* md = method->method_data_or_null();
2460 assert(md != NULL, "Sanity");
2461 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2462 assert(data != NULL, "must have profiling data");
2463 int offset;
2464 if (x->direction() == Goto::taken) {
2465 assert(data->is_BranchData(), "need BranchData for two-way branches");
2466 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2467 } else if (x->direction() == Goto::not_taken) {
2468 assert(data->is_BranchData(), "need BranchData for two-way branches");
2469 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2470 } else {
2471 assert(data->is_JumpData(), "need JumpData for branches");
2472 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2473 }
2474 LIR_Opr md_reg = new_register(T_METADATA);
2475 __ metadata2reg(md->constant_encoding(), md_reg);
2476
2477 increment_counter(new LIR_Address(md_reg, offset,
2478 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2479 }
2480
2481 // emit phi-instruction move after safepoint since this simplifies
2482 // describing the state as the safepoint.
2483 move_to_phi(x->state());
2484
2485 __ jump(x->default_sux());
2486 }
2487
2488 /**
2489 * Emit profiling code if needed for arguments, parameters, return value types
2490 *
2491 * @param md MDO the code will update at runtime
2492 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2493 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2494 * @param profiled_k current profile
2495 * @param obj IR node for the object to be profiled
2496 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2497 * Set once we find an update to make and use for next ones.
2498 * @param not_null true if we know obj cannot be null
2499 * @param signature_at_call_k signature at call for obj
2500 * @param callee_signature_k signature of callee for obj
2501 * at call and callee signatures differ at method handle call
2502 * @return the only klass we know will ever be seen at this profile point
2503 */
profile_type(ciMethodData * md,int md_base_offset,int md_offset,intptr_t profiled_k,Value obj,LIR_Opr & mdp,bool not_null,ciKlass * signature_at_call_k,ciKlass * callee_signature_k)2504 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2505 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2506 ciKlass* callee_signature_k) {
2507 ciKlass* result = NULL;
2508 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2509 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2510 // known not to be null or null bit already set and already set to
2511 // unknown: nothing we can do to improve profiling
2512 if (!do_null && !do_update) {
2513 return result;
2514 }
2515
2516 ciKlass* exact_klass = NULL;
2517 Compilation* comp = Compilation::current();
2518 if (do_update) {
2519 // try to find exact type, using CHA if possible, so that loading
2520 // the klass from the object can be avoided
2521 ciType* type = obj->exact_type();
2522 if (type == NULL) {
2523 type = obj->declared_type();
2524 type = comp->cha_exact_type(type);
2525 }
2526 assert(type == NULL || type->is_klass(), "type should be class");
2527 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2528
2529 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2530 }
2531
2532 if (!do_null && !do_update) {
2533 return result;
2534 }
2535
2536 ciKlass* exact_signature_k = NULL;
2537 if (do_update) {
2538 // Is the type from the signature exact (the only one possible)?
2539 exact_signature_k = signature_at_call_k->exact_klass();
2540 if (exact_signature_k == NULL) {
2541 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2542 } else {
2543 result = exact_signature_k;
2544 // Known statically. No need to emit any code: prevent
2545 // LIR_Assembler::emit_profile_type() from emitting useless code
2546 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2547 }
2548 // exact_klass and exact_signature_k can be both non NULL but
2549 // different if exact_klass is loaded after the ciObject for
2550 // exact_signature_k is created.
2551 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2552 // sometimes the type of the signature is better than the best type
2553 // the compiler has
2554 exact_klass = exact_signature_k;
2555 }
2556 if (callee_signature_k != NULL &&
2557 callee_signature_k != signature_at_call_k) {
2558 ciKlass* improved_klass = callee_signature_k->exact_klass();
2559 if (improved_klass == NULL) {
2560 improved_klass = comp->cha_exact_type(callee_signature_k);
2561 }
2562 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2563 exact_klass = exact_signature_k;
2564 }
2565 }
2566 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2567 }
2568
2569 if (!do_null && !do_update) {
2570 return result;
2571 }
2572
2573 if (mdp == LIR_OprFact::illegalOpr) {
2574 mdp = new_register(T_METADATA);
2575 __ metadata2reg(md->constant_encoding(), mdp);
2576 if (md_base_offset != 0) {
2577 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2578 mdp = new_pointer_register();
2579 __ leal(LIR_OprFact::address(base_type_address), mdp);
2580 }
2581 }
2582 LIRItem value(obj, this);
2583 value.load_item();
2584 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2585 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2586 return result;
2587 }
2588
2589 // profile parameters on entry to the root of the compilation
profile_parameters(Base * x)2590 void LIRGenerator::profile_parameters(Base* x) {
2591 if (compilation()->profile_parameters()) {
2592 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2593 ciMethodData* md = scope()->method()->method_data_or_null();
2594 assert(md != NULL, "Sanity");
2595
2596 if (md->parameters_type_data() != NULL) {
2597 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2598 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2599 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2600 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2601 LIR_Opr src = args->at(i);
2602 assert(!src->is_illegal(), "check");
2603 BasicType t = src->type();
2604 if (is_reference_type(t)) {
2605 intptr_t profiled_k = parameters->type(j);
2606 Local* local = x->state()->local_at(java_index)->as_Local();
2607 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2608 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2609 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2610 // If the profile is known statically set it once for all and do not emit any code
2611 if (exact != NULL) {
2612 md->set_parameter_type(j, exact);
2613 }
2614 j++;
2615 }
2616 java_index += type2size[t];
2617 }
2618 }
2619 }
2620 }
2621
do_Base(Base * x)2622 void LIRGenerator::do_Base(Base* x) {
2623 __ std_entry(LIR_OprFact::illegalOpr);
2624 // Emit moves from physical registers / stack slots to virtual registers
2625 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2626 IRScope* irScope = compilation()->hir()->top_scope();
2627 int java_index = 0;
2628 for (int i = 0; i < args->length(); i++) {
2629 LIR_Opr src = args->at(i);
2630 assert(!src->is_illegal(), "check");
2631 BasicType t = src->type();
2632
2633 // Types which are smaller than int are passed as int, so
2634 // correct the type which passed.
2635 switch (t) {
2636 case T_BYTE:
2637 case T_BOOLEAN:
2638 case T_SHORT:
2639 case T_CHAR:
2640 t = T_INT;
2641 break;
2642 default:
2643 break;
2644 }
2645
2646 LIR_Opr dest = new_register(t);
2647 __ move(src, dest);
2648
2649 // Assign new location to Local instruction for this local
2650 Local* local = x->state()->local_at(java_index)->as_Local();
2651 assert(local != NULL, "Locals for incoming arguments must have been created");
2652 #ifndef __SOFTFP__
2653 // The java calling convention passes double as long and float as int.
2654 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2655 #endif // __SOFTFP__
2656 local->set_operand(dest);
2657 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2658 java_index += type2size[t];
2659 }
2660
2661 if (compilation()->env()->dtrace_method_probes()) {
2662 BasicTypeList signature;
2663 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2664 signature.append(T_METADATA); // Method*
2665 LIR_OprList* args = new LIR_OprList();
2666 args->append(getThreadPointer());
2667 LIR_Opr meth = new_register(T_METADATA);
2668 __ metadata2reg(method()->constant_encoding(), meth);
2669 args->append(meth);
2670 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2671 }
2672
2673 if (method()->is_synchronized()) {
2674 LIR_Opr obj;
2675 if (method()->is_static()) {
2676 obj = new_register(T_OBJECT);
2677 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2678 } else {
2679 Local* receiver = x->state()->local_at(0)->as_Local();
2680 assert(receiver != NULL, "must already exist");
2681 obj = receiver->operand();
2682 }
2683 assert(obj->is_valid(), "must be valid");
2684
2685 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2686 LIR_Opr lock = syncLockOpr();
2687 __ load_stack_address_monitor(0, lock);
2688
2689 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2690 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2691
2692 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2693 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2694 }
2695 }
2696 if (compilation()->age_code()) {
2697 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2698 decrement_age(info);
2699 }
2700 // increment invocation counters if needed
2701 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2702 profile_parameters(x);
2703 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2704 increment_invocation_counter(info);
2705 }
2706
2707 // all blocks with a successor must end with an unconditional jump
2708 // to the successor even if they are consecutive
2709 __ jump(x->default_sux());
2710 }
2711
2712
do_OsrEntry(OsrEntry * x)2713 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2714 // construct our frame and model the production of incoming pointer
2715 // to the OSR buffer.
2716 __ osr_entry(LIR_Assembler::osrBufferPointer());
2717 LIR_Opr result = rlock_result(x);
2718 __ move(LIR_Assembler::osrBufferPointer(), result);
2719 }
2720
2721
invoke_load_arguments(Invoke * x,LIRItemList * args,const LIR_OprList * arg_list)2722 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2723 assert(args->length() == arg_list->length(),
2724 "args=%d, arg_list=%d", args->length(), arg_list->length());
2725 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2726 LIRItem* param = args->at(i);
2727 LIR_Opr loc = arg_list->at(i);
2728 if (loc->is_register()) {
2729 param->load_item_force(loc);
2730 } else {
2731 LIR_Address* addr = loc->as_address_ptr();
2732 param->load_for_store(addr->type());
2733 if (addr->type() == T_OBJECT) {
2734 __ move_wide(param->result(), addr);
2735 } else
2736 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2737 __ unaligned_move(param->result(), addr);
2738 } else {
2739 __ move(param->result(), addr);
2740 }
2741 }
2742 }
2743
2744 if (x->has_receiver()) {
2745 LIRItem* receiver = args->at(0);
2746 LIR_Opr loc = arg_list->at(0);
2747 if (loc->is_register()) {
2748 receiver->load_item_force(loc);
2749 } else {
2750 assert(loc->is_address(), "just checking");
2751 receiver->load_for_store(T_OBJECT);
2752 __ move_wide(receiver->result(), loc->as_address_ptr());
2753 }
2754 }
2755 }
2756
2757
2758 // Visits all arguments, returns appropriate items without loading them
invoke_visit_arguments(Invoke * x)2759 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2760 LIRItemList* argument_items = new LIRItemList();
2761 if (x->has_receiver()) {
2762 LIRItem* receiver = new LIRItem(x->receiver(), this);
2763 argument_items->append(receiver);
2764 }
2765 for (int i = 0; i < x->number_of_arguments(); i++) {
2766 LIRItem* param = new LIRItem(x->argument_at(i), this);
2767 argument_items->append(param);
2768 }
2769 return argument_items;
2770 }
2771
2772
2773 // The invoke with receiver has following phases:
2774 // a) traverse and load/lock receiver;
2775 // b) traverse all arguments -> item-array (invoke_visit_argument)
2776 // c) push receiver on stack
2777 // d) load each of the items and push on stack
2778 // e) unlock receiver
2779 // f) move receiver into receiver-register %o0
2780 // g) lock result registers and emit call operation
2781 //
2782 // Before issuing a call, we must spill-save all values on stack
2783 // that are in caller-save register. "spill-save" moves those registers
2784 // either in a free callee-save register or spills them if no free
2785 // callee save register is available.
2786 //
2787 // The problem is where to invoke spill-save.
2788 // - if invoked between e) and f), we may lock callee save
2789 // register in "spill-save" that destroys the receiver register
2790 // before f) is executed
2791 // - if we rearrange f) to be earlier (by loading %o0) it
2792 // may destroy a value on the stack that is currently in %o0
2793 // and is waiting to be spilled
2794 // - if we keep the receiver locked while doing spill-save,
2795 // we cannot spill it as it is spill-locked
2796 //
do_Invoke(Invoke * x)2797 void LIRGenerator::do_Invoke(Invoke* x) {
2798 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2799
2800 LIR_OprList* arg_list = cc->args();
2801 LIRItemList* args = invoke_visit_arguments(x);
2802 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2803
2804 // setup result register
2805 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2806 if (x->type() != voidType) {
2807 result_register = result_register_for(x->type());
2808 }
2809
2810 CodeEmitInfo* info = state_for(x, x->state());
2811
2812 invoke_load_arguments(x, args, arg_list);
2813
2814 if (x->has_receiver()) {
2815 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2816 receiver = args->at(0)->result();
2817 }
2818
2819 // emit invoke code
2820 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2821
2822 // JSR 292
2823 // Preserve the SP over MethodHandle call sites, if needed.
2824 ciMethod* target = x->target();
2825 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2826 target->is_method_handle_intrinsic() ||
2827 target->is_compiled_lambda_form());
2828 if (is_method_handle_invoke) {
2829 info->set_is_method_handle_invoke(true);
2830 if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2831 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2832 }
2833 }
2834
2835 switch (x->code()) {
2836 case Bytecodes::_invokestatic:
2837 __ call_static(target, result_register,
2838 SharedRuntime::get_resolve_static_call_stub(),
2839 arg_list, info);
2840 break;
2841 case Bytecodes::_invokespecial:
2842 case Bytecodes::_invokevirtual:
2843 case Bytecodes::_invokeinterface:
2844 // for loaded and final (method or class) target we still produce an inline cache,
2845 // in order to be able to call mixed mode
2846 if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {
2847 __ call_opt_virtual(target, receiver, result_register,
2848 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2849 arg_list, info);
2850 } else if (x->vtable_index() < 0) {
2851 __ call_icvirtual(target, receiver, result_register,
2852 SharedRuntime::get_resolve_virtual_call_stub(),
2853 arg_list, info);
2854 } else {
2855 int entry_offset = in_bytes(Klass::vtable_start_offset()) + x->vtable_index() * vtableEntry::size_in_bytes();
2856 int vtable_offset = entry_offset + vtableEntry::method_offset_in_bytes();
2857 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2858 }
2859 break;
2860 case Bytecodes::_invokedynamic: {
2861 __ call_dynamic(target, receiver, result_register,
2862 SharedRuntime::get_resolve_static_call_stub(),
2863 arg_list, info);
2864 break;
2865 }
2866 default:
2867 fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
2868 break;
2869 }
2870
2871 // JSR 292
2872 // Restore the SP after MethodHandle call sites, if needed.
2873 if (is_method_handle_invoke
2874 && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2875 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2876 }
2877
2878 if (x->type()->is_float() || x->type()->is_double()) {
2879 // Force rounding of results from non-strictfp when in strictfp
2880 // scope (or when we don't know the strictness of the callee, to
2881 // be safe.)
2882 if (method()->is_strict()) {
2883 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2884 result_register = round_item(result_register);
2885 }
2886 }
2887 }
2888
2889 if (result_register->is_valid()) {
2890 LIR_Opr result = rlock_result(x);
2891 __ move(result_register, result);
2892 }
2893 }
2894
2895
do_FPIntrinsics(Intrinsic * x)2896 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2897 assert(x->number_of_arguments() == 1, "wrong type");
2898 LIRItem value (x->argument_at(0), this);
2899 LIR_Opr reg = rlock_result(x);
2900 value.load_item();
2901 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2902 __ move(tmp, reg);
2903 }
2904
2905
2906
2907 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
do_IfOp(IfOp * x)2908 void LIRGenerator::do_IfOp(IfOp* x) {
2909 #ifdef ASSERT
2910 {
2911 ValueTag xtag = x->x()->type()->tag();
2912 ValueTag ttag = x->tval()->type()->tag();
2913 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2914 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2915 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2916 }
2917 #endif
2918
2919 LIRItem left(x->x(), this);
2920 LIRItem right(x->y(), this);
2921 left.load_item();
2922 if (can_inline_as_constant(right.value())) {
2923 right.dont_load_item();
2924 } else {
2925 right.load_item();
2926 }
2927
2928 LIRItem t_val(x->tval(), this);
2929 LIRItem f_val(x->fval(), this);
2930 t_val.dont_load_item();
2931 f_val.dont_load_item();
2932 LIR_Opr reg = rlock_result(x);
2933
2934 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2935 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2936 }
2937
2938 #ifdef JFR_HAVE_INTRINSICS
do_ClassIDIntrinsic(Intrinsic * x)2939 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2940 CodeEmitInfo* info = state_for(x);
2941 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2942
2943 assert(info != NULL, "must have info");
2944 LIRItem arg(x->argument_at(0), this);
2945
2946 arg.load_item();
2947 LIR_Opr klass = new_register(T_METADATA);
2948 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
2949 LIR_Opr id = new_register(T_LONG);
2950 ByteSize offset = KLASS_TRACE_ID_OFFSET;
2951 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2952
2953 __ move(trace_id_addr, id);
2954 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2955 __ store(id, trace_id_addr);
2956
2957 #ifdef TRACE_ID_META_BITS
2958 __ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
2959 #endif
2960 #ifdef TRACE_ID_SHIFT
2961 __ unsigned_shift_right(id, TRACE_ID_SHIFT, id);
2962 #endif
2963
2964 __ move(id, rlock_result(x));
2965 }
2966
do_getEventWriter(Intrinsic * x)2967 void LIRGenerator::do_getEventWriter(Intrinsic* x) {
2968 LabelObj* L_end = new LabelObj();
2969
2970 LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
2971 in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
2972 T_OBJECT);
2973 LIR_Opr result = rlock_result(x);
2974 __ move_wide(jobj_addr, result);
2975 __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
2976 __ branch(lir_cond_equal, T_OBJECT, L_end->label());
2977
2978 LIR_Opr jobj = new_register(T_OBJECT);
2979 __ move(result, jobj);
2980 access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result);
2981
2982 __ branch_destination(L_end->label());
2983 }
2984
2985 #endif
2986
2987
do_RuntimeCall(address routine,Intrinsic * x)2988 void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {
2989 assert(x->number_of_arguments() == 0, "wrong type");
2990 // Enforce computation of _reserved_argument_area_size which is required on some platforms.
2991 BasicTypeList signature;
2992 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2993 LIR_Opr reg = result_register_for(x->type());
2994 __ call_runtime_leaf(routine, getThreadTemp(),
2995 reg, new LIR_OprList());
2996 LIR_Opr result = rlock_result(x);
2997 __ move(reg, result);
2998 }
2999
3000
3001
do_Intrinsic(Intrinsic * x)3002 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3003 switch (x->id()) {
3004 case vmIntrinsics::_intBitsToFloat :
3005 case vmIntrinsics::_doubleToRawLongBits :
3006 case vmIntrinsics::_longBitsToDouble :
3007 case vmIntrinsics::_floatToRawIntBits : {
3008 do_FPIntrinsics(x);
3009 break;
3010 }
3011
3012 #ifdef JFR_HAVE_INTRINSICS
3013 case vmIntrinsics::_getClassId:
3014 do_ClassIDIntrinsic(x);
3015 break;
3016 case vmIntrinsics::_getEventWriter:
3017 do_getEventWriter(x);
3018 break;
3019 case vmIntrinsics::_counterTime:
3020 do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), x);
3021 break;
3022 #endif
3023
3024 case vmIntrinsics::_currentTimeMillis:
3025 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);
3026 break;
3027
3028 case vmIntrinsics::_nanoTime:
3029 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);
3030 break;
3031
3032 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3033 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3034 case vmIntrinsics::_isPrimitive: do_isPrimitive(x); break;
3035 case vmIntrinsics::_getClass: do_getClass(x); break;
3036 case vmIntrinsics::_currentThread: do_currentThread(x); break;
3037
3038 case vmIntrinsics::_dlog: // fall through
3039 case vmIntrinsics::_dlog10: // fall through
3040 case vmIntrinsics::_dabs: // fall through
3041 case vmIntrinsics::_dsqrt: // fall through
3042 case vmIntrinsics::_dtan: // fall through
3043 case vmIntrinsics::_dsin : // fall through
3044 case vmIntrinsics::_dcos : // fall through
3045 case vmIntrinsics::_dexp : // fall through
3046 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3047 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3048
3049 case vmIntrinsics::_fmaD: do_FmaIntrinsic(x); break;
3050 case vmIntrinsics::_fmaF: do_FmaIntrinsic(x); break;
3051
3052 // java.nio.Buffer.checkIndex
3053 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3054
3055 case vmIntrinsics::_compareAndSetReference:
3056 do_CompareAndSwap(x, objectType);
3057 break;
3058 case vmIntrinsics::_compareAndSetInt:
3059 do_CompareAndSwap(x, intType);
3060 break;
3061 case vmIntrinsics::_compareAndSetLong:
3062 do_CompareAndSwap(x, longType);
3063 break;
3064
3065 case vmIntrinsics::_loadFence :
3066 __ membar_acquire();
3067 break;
3068 case vmIntrinsics::_storeFence:
3069 __ membar_release();
3070 break;
3071 case vmIntrinsics::_fullFence :
3072 __ membar();
3073 break;
3074 case vmIntrinsics::_onSpinWait:
3075 __ on_spin_wait();
3076 break;
3077 case vmIntrinsics::_Reference_get:
3078 do_Reference_get(x);
3079 break;
3080
3081 case vmIntrinsics::_updateCRC32:
3082 case vmIntrinsics::_updateBytesCRC32:
3083 case vmIntrinsics::_updateByteBufferCRC32:
3084 do_update_CRC32(x);
3085 break;
3086
3087 case vmIntrinsics::_updateBytesCRC32C:
3088 case vmIntrinsics::_updateDirectByteBufferCRC32C:
3089 do_update_CRC32C(x);
3090 break;
3091
3092 case vmIntrinsics::_vectorizedMismatch:
3093 do_vectorizedMismatch(x);
3094 break;
3095
3096 default: ShouldNotReachHere(); break;
3097 }
3098 }
3099
profile_arguments(ProfileCall * x)3100 void LIRGenerator::profile_arguments(ProfileCall* x) {
3101 if (compilation()->profile_arguments()) {
3102 int bci = x->bci_of_invoke();
3103 ciMethodData* md = x->method()->method_data_or_null();
3104 assert(md != NULL, "Sanity");
3105 ciProfileData* data = md->bci_to_data(bci);
3106 if (data != NULL) {
3107 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3108 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3109 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3110 int base_offset = md->byte_offset_of_slot(data, extra);
3111 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3112 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3113
3114 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3115 int start = 0;
3116 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3117 if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3118 // first argument is not profiled at call (method handle invoke)
3119 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3120 start = 1;
3121 }
3122 ciSignature* callee_signature = x->callee()->signature();
3123 // method handle call to virtual method
3124 bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3125 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3126
3127 bool ignored_will_link;
3128 ciSignature* signature_at_call = NULL;
3129 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3130 ciSignatureStream signature_at_call_stream(signature_at_call);
3131
3132 // if called through method handle invoke, some arguments may have been popped
3133 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3134 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3135 ciKlass* exact = profile_type(md, base_offset, off,
3136 args->type(i), x->profiled_arg_at(i+start), mdp,
3137 !x->arg_needs_null_check(i+start),
3138 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3139 if (exact != NULL) {
3140 md->set_argument_type(bci, i, exact);
3141 }
3142 }
3143 } else {
3144 #ifdef ASSERT
3145 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3146 int n = x->nb_profiled_args();
3147 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3148 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3149 "only at JSR292 bytecodes");
3150 #endif
3151 }
3152 }
3153 }
3154 }
3155
3156 // profile parameters on entry to an inlined method
profile_parameters_at_call(ProfileCall * x)3157 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3158 if (compilation()->profile_parameters() && x->inlined()) {
3159 ciMethodData* md = x->callee()->method_data_or_null();
3160 if (md != NULL) {
3161 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3162 if (parameters_type_data != NULL) {
3163 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3164 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3165 bool has_receiver = !x->callee()->is_static();
3166 ciSignature* sig = x->callee()->signature();
3167 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3168 int i = 0; // to iterate on the Instructions
3169 Value arg = x->recv();
3170 bool not_null = false;
3171 int bci = x->bci_of_invoke();
3172 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3173 // The first parameter is the receiver so that's what we start
3174 // with if it exists. One exception is method handle call to
3175 // virtual method: the receiver is in the args list
3176 if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3177 i = 1;
3178 arg = x->profiled_arg_at(0);
3179 not_null = !x->arg_needs_null_check(0);
3180 }
3181 int k = 0; // to iterate on the profile data
3182 for (;;) {
3183 intptr_t profiled_k = parameters->type(k);
3184 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3185 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3186 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3187 // If the profile is known statically set it once for all and do not emit any code
3188 if (exact != NULL) {
3189 md->set_parameter_type(k, exact);
3190 }
3191 k++;
3192 if (k >= parameters_type_data->number_of_parameters()) {
3193 #ifdef ASSERT
3194 int extra = 0;
3195 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3196 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3197 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3198 extra += 1;
3199 }
3200 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3201 #endif
3202 break;
3203 }
3204 arg = x->profiled_arg_at(i);
3205 not_null = !x->arg_needs_null_check(i);
3206 i++;
3207 }
3208 }
3209 }
3210 }
3211 }
3212
do_ProfileCall(ProfileCall * x)3213 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3214 // Need recv in a temporary register so it interferes with the other temporaries
3215 LIR_Opr recv = LIR_OprFact::illegalOpr;
3216 LIR_Opr mdo = new_register(T_METADATA);
3217 // tmp is used to hold the counters on SPARC
3218 LIR_Opr tmp = new_pointer_register();
3219
3220 if (x->nb_profiled_args() > 0) {
3221 profile_arguments(x);
3222 }
3223
3224 // profile parameters on inlined method entry including receiver
3225 if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3226 profile_parameters_at_call(x);
3227 }
3228
3229 if (x->recv() != NULL) {
3230 LIRItem value(x->recv(), this);
3231 value.load_item();
3232 recv = new_register(T_OBJECT);
3233 __ move(value.result(), recv);
3234 }
3235 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3236 }
3237
do_ProfileReturnType(ProfileReturnType * x)3238 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3239 int bci = x->bci_of_invoke();
3240 ciMethodData* md = x->method()->method_data_or_null();
3241 assert(md != NULL, "Sanity");
3242 ciProfileData* data = md->bci_to_data(bci);
3243 if (data != NULL) {
3244 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3245 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3246 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3247
3248 bool ignored_will_link;
3249 ciSignature* signature_at_call = NULL;
3250 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3251
3252 // The offset within the MDO of the entry to update may be too large
3253 // to be used in load/store instructions on some platforms. So have
3254 // profile_type() compute the address of the profile in a register.
3255 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3256 ret->type(), x->ret(), mdp,
3257 !x->needs_null_check(),
3258 signature_at_call->return_type()->as_klass(),
3259 x->callee()->signature()->return_type()->as_klass());
3260 if (exact != NULL) {
3261 md->set_return_type(bci, exact);
3262 }
3263 }
3264 }
3265
do_ProfileInvoke(ProfileInvoke * x)3266 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3267 // We can safely ignore accessors here, since c2 will inline them anyway,
3268 // accessors are also always mature.
3269 if (!x->inlinee()->is_accessor()) {
3270 CodeEmitInfo* info = state_for(x, x->state(), true);
3271 // Notify the runtime very infrequently only to take care of counter overflows
3272 int freq_log = Tier23InlineeNotifyFreqLog;
3273 double scale;
3274 if (_method->has_option_value("CompileThresholdScaling", scale)) {
3275 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3276 }
3277 increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
3278 }
3279 }
3280
increment_backedge_counter_conditionally(LIR_Condition cond,LIR_Opr left,LIR_Opr right,CodeEmitInfo * info,int left_bci,int right_bci,int bci)3281 void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {
3282 if (compilation()->count_backedges()) {
3283 #if defined(X86) && !defined(_LP64)
3284 // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.
3285 LIR_Opr left_copy = new_register(left->type());
3286 __ move(left, left_copy);
3287 __ cmp(cond, left_copy, right);
3288 #else
3289 __ cmp(cond, left, right);
3290 #endif
3291 LIR_Opr step = new_register(T_INT);
3292 LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);
3293 LIR_Opr zero = LIR_OprFact::intConst(0);
3294 __ cmove(cond,
3295 (left_bci < bci) ? plus_one : zero,
3296 (right_bci < bci) ? plus_one : zero,
3297 step, left->type());
3298 increment_backedge_counter(info, step, bci);
3299 }
3300 }
3301
3302
increment_event_counter(CodeEmitInfo * info,LIR_Opr step,int bci,bool backedge)3303 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {
3304 int freq_log = 0;
3305 int level = compilation()->env()->comp_level();
3306 if (level == CompLevel_limited_profile) {
3307 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3308 } else if (level == CompLevel_full_profile) {
3309 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3310 } else {
3311 ShouldNotReachHere();
3312 }
3313 // Increment the appropriate invocation/backedge counter and notify the runtime.
3314 double scale;
3315 if (_method->has_option_value("CompileThresholdScaling", scale)) {
3316 freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
3317 }
3318 increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
3319 }
3320
decrement_age(CodeEmitInfo * info)3321 void LIRGenerator::decrement_age(CodeEmitInfo* info) {
3322 ciMethod* method = info->scope()->method();
3323 MethodCounters* mc_adr = method->ensure_method_counters();
3324 if (mc_adr != NULL) {
3325 LIR_Opr mc = new_pointer_register();
3326 __ move(LIR_OprFact::intptrConst(mc_adr), mc);
3327 int offset = in_bytes(MethodCounters::nmethod_age_offset());
3328 LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
3329 LIR_Opr result = new_register(T_INT);
3330 __ load(counter, result);
3331 __ sub(result, LIR_OprFact::intConst(1), result);
3332 __ store(result, counter);
3333 // DeoptimizeStub will reexecute from the current state in code info.
3334 CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
3335 Deoptimization::Action_make_not_entrant);
3336 __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
3337 __ branch(lir_cond_lessEqual, T_INT, deopt);
3338 }
3339 }
3340
3341
increment_event_counter_impl(CodeEmitInfo * info,ciMethod * method,LIR_Opr step,int frequency,int bci,bool backedge,bool notify)3342 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3343 ciMethod *method, LIR_Opr step, int frequency,
3344 int bci, bool backedge, bool notify) {
3345 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3346 int level = _compilation->env()->comp_level();
3347 assert(level > CompLevel_simple, "Shouldn't be here");
3348
3349 int offset = -1;
3350 LIR_Opr counter_holder = NULL;
3351 if (level == CompLevel_limited_profile) {
3352 MethodCounters* counters_adr = method->ensure_method_counters();
3353 if (counters_adr == NULL) {
3354 bailout("method counters allocation failed");
3355 return;
3356 }
3357 counter_holder = new_pointer_register();
3358 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3359 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3360 MethodCounters::invocation_counter_offset());
3361 } else if (level == CompLevel_full_profile) {
3362 counter_holder = new_register(T_METADATA);
3363 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3364 MethodData::invocation_counter_offset());
3365 ciMethodData* md = method->method_data_or_null();
3366 assert(md != NULL, "Sanity");
3367 __ metadata2reg(md->constant_encoding(), counter_holder);
3368 } else {
3369 ShouldNotReachHere();
3370 }
3371 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3372 LIR_Opr result = new_register(T_INT);
3373 __ load(counter, result);
3374 __ add(result, step, result);
3375 __ store(result, counter);
3376 if (notify && (!backedge || UseOnStackReplacement)) {
3377 LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
3378 // The bci for info can point to cmp for if's we want the if bci
3379 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3380 int freq = frequency << InvocationCounter::count_shift;
3381 if (freq == 0) {
3382 if (!step->is_constant()) {
3383 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3384 __ branch(lir_cond_notEqual, T_ILLEGAL, overflow);
3385 } else {
3386 __ branch(lir_cond_always, T_ILLEGAL, overflow);
3387 }
3388 } else {
3389 LIR_Opr mask = load_immediate(freq, T_INT);
3390 if (!step->is_constant()) {
3391 // If step is 0, make sure the overflow check below always fails
3392 __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));
3393 __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);
3394 }
3395 __ logical_and(result, mask, result);
3396 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3397 __ branch(lir_cond_equal, T_INT, overflow);
3398 }
3399 __ branch_destination(overflow->continuation());
3400 }
3401 }
3402
do_RuntimeCall(RuntimeCall * x)3403 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3404 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3405 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3406
3407 if (x->pass_thread()) {
3408 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3409 args->append(getThreadPointer());
3410 }
3411
3412 for (int i = 0; i < x->number_of_arguments(); i++) {
3413 Value a = x->argument_at(i);
3414 LIRItem* item = new LIRItem(a, this);
3415 item->load_item();
3416 args->append(item->result());
3417 signature->append(as_BasicType(a->type()));
3418 }
3419
3420 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3421 if (x->type() == voidType) {
3422 set_no_result(x);
3423 } else {
3424 __ move(result, rlock_result(x));
3425 }
3426 }
3427
3428 #ifdef ASSERT
do_Assert(Assert * x)3429 void LIRGenerator::do_Assert(Assert *x) {
3430 ValueTag tag = x->x()->type()->tag();
3431 If::Condition cond = x->cond();
3432
3433 LIRItem xitem(x->x(), this);
3434 LIRItem yitem(x->y(), this);
3435 LIRItem* xin = &xitem;
3436 LIRItem* yin = &yitem;
3437
3438 assert(tag == intTag, "Only integer assertions are valid!");
3439
3440 xin->load_item();
3441 yin->dont_load_item();
3442
3443 set_no_result(x);
3444
3445 LIR_Opr left = xin->result();
3446 LIR_Opr right = yin->result();
3447
3448 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3449 }
3450 #endif
3451
do_RangeCheckPredicate(RangeCheckPredicate * x)3452 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3453
3454
3455 Instruction *a = x->x();
3456 Instruction *b = x->y();
3457 if (!a || StressRangeCheckElimination) {
3458 assert(!b || StressRangeCheckElimination, "B must also be null");
3459
3460 CodeEmitInfo *info = state_for(x, x->state());
3461 CodeStub* stub = new PredicateFailedStub(info);
3462
3463 __ jump(stub);
3464 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3465 int a_int = a->type()->as_IntConstant()->value();
3466 int b_int = b->type()->as_IntConstant()->value();
3467
3468 bool ok = false;
3469
3470 switch(x->cond()) {
3471 case Instruction::eql: ok = (a_int == b_int); break;
3472 case Instruction::neq: ok = (a_int != b_int); break;
3473 case Instruction::lss: ok = (a_int < b_int); break;
3474 case Instruction::leq: ok = (a_int <= b_int); break;
3475 case Instruction::gtr: ok = (a_int > b_int); break;
3476 case Instruction::geq: ok = (a_int >= b_int); break;
3477 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3478 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3479 default: ShouldNotReachHere();
3480 }
3481
3482 if (ok) {
3483
3484 CodeEmitInfo *info = state_for(x, x->state());
3485 CodeStub* stub = new PredicateFailedStub(info);
3486
3487 __ jump(stub);
3488 }
3489 } else {
3490
3491 ValueTag tag = x->x()->type()->tag();
3492 If::Condition cond = x->cond();
3493 LIRItem xitem(x->x(), this);
3494 LIRItem yitem(x->y(), this);
3495 LIRItem* xin = &xitem;
3496 LIRItem* yin = &yitem;
3497
3498 assert(tag == intTag, "Only integer deoptimizations are valid!");
3499
3500 xin->load_item();
3501 yin->dont_load_item();
3502 set_no_result(x);
3503
3504 LIR_Opr left = xin->result();
3505 LIR_Opr right = yin->result();
3506
3507 CodeEmitInfo *info = state_for(x, x->state());
3508 CodeStub* stub = new PredicateFailedStub(info);
3509
3510 __ cmp(lir_cond(cond), left, right);
3511 __ branch(lir_cond(cond), right->type(), stub);
3512 }
3513 }
3514
3515
call_runtime(Value arg1,address entry,ValueType * result_type,CodeEmitInfo * info)3516 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3517 LIRItemList args(1);
3518 LIRItem value(arg1, this);
3519 args.append(&value);
3520 BasicTypeList signature;
3521 signature.append(as_BasicType(arg1->type()));
3522
3523 return call_runtime(&signature, &args, entry, result_type, info);
3524 }
3525
3526
call_runtime(Value arg1,Value arg2,address entry,ValueType * result_type,CodeEmitInfo * info)3527 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3528 LIRItemList args(2);
3529 LIRItem value1(arg1, this);
3530 LIRItem value2(arg2, this);
3531 args.append(&value1);
3532 args.append(&value2);
3533 BasicTypeList signature;
3534 signature.append(as_BasicType(arg1->type()));
3535 signature.append(as_BasicType(arg2->type()));
3536
3537 return call_runtime(&signature, &args, entry, result_type, info);
3538 }
3539
3540
call_runtime(BasicTypeArray * signature,LIR_OprList * args,address entry,ValueType * result_type,CodeEmitInfo * info)3541 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3542 address entry, ValueType* result_type, CodeEmitInfo* info) {
3543 // get a result register
3544 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3545 LIR_Opr result = LIR_OprFact::illegalOpr;
3546 if (result_type->tag() != voidTag) {
3547 result = new_register(result_type);
3548 phys_reg = result_register_for(result_type);
3549 }
3550
3551 // move the arguments into the correct location
3552 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3553 assert(cc->length() == args->length(), "argument mismatch");
3554 for (int i = 0; i < args->length(); i++) {
3555 LIR_Opr arg = args->at(i);
3556 LIR_Opr loc = cc->at(i);
3557 if (loc->is_register()) {
3558 __ move(arg, loc);
3559 } else {
3560 LIR_Address* addr = loc->as_address_ptr();
3561 // if (!can_store_as_constant(arg)) {
3562 // LIR_Opr tmp = new_register(arg->type());
3563 // __ move(arg, tmp);
3564 // arg = tmp;
3565 // }
3566 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3567 __ unaligned_move(arg, addr);
3568 } else {
3569 __ move(arg, addr);
3570 }
3571 }
3572 }
3573
3574 if (info) {
3575 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3576 } else {
3577 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3578 }
3579 if (result->is_valid()) {
3580 __ move(phys_reg, result);
3581 }
3582 return result;
3583 }
3584
3585
call_runtime(BasicTypeArray * signature,LIRItemList * args,address entry,ValueType * result_type,CodeEmitInfo * info)3586 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3587 address entry, ValueType* result_type, CodeEmitInfo* info) {
3588 // get a result register
3589 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3590 LIR_Opr result = LIR_OprFact::illegalOpr;
3591 if (result_type->tag() != voidTag) {
3592 result = new_register(result_type);
3593 phys_reg = result_register_for(result_type);
3594 }
3595
3596 // move the arguments into the correct location
3597 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3598
3599 assert(cc->length() == args->length(), "argument mismatch");
3600 for (int i = 0; i < args->length(); i++) {
3601 LIRItem* arg = args->at(i);
3602 LIR_Opr loc = cc->at(i);
3603 if (loc->is_register()) {
3604 arg->load_item_force(loc);
3605 } else {
3606 LIR_Address* addr = loc->as_address_ptr();
3607 arg->load_for_store(addr->type());
3608 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3609 __ unaligned_move(arg->result(), addr);
3610 } else {
3611 __ move(arg->result(), addr);
3612 }
3613 }
3614 }
3615
3616 if (info) {
3617 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3618 } else {
3619 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3620 }
3621 if (result->is_valid()) {
3622 __ move(phys_reg, result);
3623 }
3624 return result;
3625 }
3626
do_MemBar(MemBar * x)3627 void LIRGenerator::do_MemBar(MemBar* x) {
3628 LIR_Code code = x->code();
3629 switch(code) {
3630 case lir_membar_acquire : __ membar_acquire(); break;
3631 case lir_membar_release : __ membar_release(); break;
3632 case lir_membar : __ membar(); break;
3633 case lir_membar_loadload : __ membar_loadload(); break;
3634 case lir_membar_storestore: __ membar_storestore(); break;
3635 case lir_membar_loadstore : __ membar_loadstore(); break;
3636 case lir_membar_storeload : __ membar_storeload(); break;
3637 default : ShouldNotReachHere(); break;
3638 }
3639 }
3640
mask_boolean(LIR_Opr array,LIR_Opr value,CodeEmitInfo * & null_check_info)3641 LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3642 LIR_Opr value_fixed = rlock_byte(T_BYTE);
3643 if (TwoOperandLIRForm) {
3644 __ move(value, value_fixed);
3645 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3646 } else {
3647 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3648 }
3649 LIR_Opr klass = new_register(T_METADATA);
3650 __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3651 null_check_info = NULL;
3652 LIR_Opr layout = new_register(T_INT);
3653 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3654 int diffbit = Klass::layout_helper_boolean_diffbit();
3655 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3656 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3657 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3658 value = value_fixed;
3659 return value;
3660 }
3661
maybe_mask_boolean(StoreIndexed * x,LIR_Opr array,LIR_Opr value,CodeEmitInfo * & null_check_info)3662 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3663 if (x->check_boolean()) {
3664 value = mask_boolean(array, value, null_check_info);
3665 }
3666 return value;
3667 }
3668