1 /*
2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "c1/c1_CFGPrinter.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_IR.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_LinearScan.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "code/vmreg.inline.hpp"
35 #include "runtime/timerTrace.hpp"
36 #include "utilities/bitMap.inline.hpp"
37 
38 #ifndef PRODUCT
39 
40   static LinearScanStatistic _stat_before_alloc;
41   static LinearScanStatistic _stat_after_asign;
42   static LinearScanStatistic _stat_final;
43 
44   static LinearScanTimers _total_timer;
45 
46   // helper macro for short definition of timer
47   #define TIME_LINEAR_SCAN(timer_name)  TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose);
48 
49   // helper macro for short definition of trace-output inside code
50   #define TRACE_LINEAR_SCAN(level, code)       \
51     if (TraceLinearScanLevel >= level) {       \
52       code;                                    \
53     }
54 
55 #else
56 
57   #define TIME_LINEAR_SCAN(timer_name)
58   #define TRACE_LINEAR_SCAN(level, code)
59 
60 #endif
61 
62 // Map BasicType to spill size in 32-bit words, matching VMReg's notion of words
63 #ifdef _LP64
64 static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2,  1, 2, 1, -1};
65 #else
66 static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1, 1, 1, -1};
67 #endif
68 
69 
70 // Implementation of LinearScan
71 
LinearScan(IR * ir,LIRGenerator * gen,FrameMap * frame_map)72 LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
73  : _compilation(ir->compilation())
74  , _ir(ir)
75  , _gen(gen)
76  , _frame_map(frame_map)
77  , _num_virtual_regs(gen->max_virtual_register_number())
78  , _has_fpu_registers(false)
79  , _num_calls(-1)
80  , _max_spills(0)
81  , _unused_spill_slot(-1)
82  , _intervals(0)   // initialized later with correct length
83  , _new_intervals_from_allocation(NULL)
84  , _sorted_intervals(NULL)
85  , _needs_full_resort(false)
86  , _lir_ops(0)     // initialized later with correct length
87  , _block_of_op(0) // initialized later with correct length
88  , _has_info(0)
89  , _has_call(0)
90  , _scope_value_cache(0) // initialized later with correct length
91  , _interval_in_loop(0)  // initialized later with correct length
92  , _cached_blocks(*ir->linear_scan_order())
93 #ifdef X86
94  , _fpu_stack_allocator(NULL)
95 #endif
96 {
97   assert(this->ir() != NULL,          "check if valid");
98   assert(this->compilation() != NULL, "check if valid");
99   assert(this->gen() != NULL,         "check if valid");
100   assert(this->frame_map() != NULL,   "check if valid");
101 }
102 
103 
104 // ********** functions for converting LIR-Operands to register numbers
105 //
106 // Emulate a flat register file comprising physical integer registers,
107 // physical floating-point registers and virtual registers, in that order.
108 // Virtual registers already have appropriate numbers, since V0 is
109 // the number of physical registers.
110 // Returns -1 for hi word if opr is a single word operand.
111 //
112 // Note: the inverse operation (calculating an operand for register numbers)
113 //       is done in calc_operand_for_interval()
114 
reg_num(LIR_Opr opr)115 int LinearScan::reg_num(LIR_Opr opr) {
116   assert(opr->is_register(), "should not call this otherwise");
117 
118   if (opr->is_virtual_register()) {
119     assert(opr->vreg_number() >= nof_regs, "found a virtual register with a fixed-register number");
120     return opr->vreg_number();
121   } else if (opr->is_single_cpu()) {
122     return opr->cpu_regnr();
123   } else if (opr->is_double_cpu()) {
124     return opr->cpu_regnrLo();
125 #ifdef X86
126   } else if (opr->is_single_xmm()) {
127     return opr->fpu_regnr() + pd_first_xmm_reg;
128   } else if (opr->is_double_xmm()) {
129     return opr->fpu_regnrLo() + pd_first_xmm_reg;
130 #endif
131   } else if (opr->is_single_fpu()) {
132     return opr->fpu_regnr() + pd_first_fpu_reg;
133   } else if (opr->is_double_fpu()) {
134     return opr->fpu_regnrLo() + pd_first_fpu_reg;
135   } else {
136     ShouldNotReachHere();
137     return -1;
138   }
139 }
140 
reg_numHi(LIR_Opr opr)141 int LinearScan::reg_numHi(LIR_Opr opr) {
142   assert(opr->is_register(), "should not call this otherwise");
143 
144   if (opr->is_virtual_register()) {
145     return -1;
146   } else if (opr->is_single_cpu()) {
147     return -1;
148   } else if (opr->is_double_cpu()) {
149     return opr->cpu_regnrHi();
150 #ifdef X86
151   } else if (opr->is_single_xmm()) {
152     return -1;
153   } else if (opr->is_double_xmm()) {
154     return -1;
155 #endif
156   } else if (opr->is_single_fpu()) {
157     return -1;
158   } else if (opr->is_double_fpu()) {
159     return opr->fpu_regnrHi() + pd_first_fpu_reg;
160   } else {
161     ShouldNotReachHere();
162     return -1;
163   }
164 }
165 
166 
167 // ********** functions for classification of intervals
168 
is_precolored_interval(const Interval * i)169 bool LinearScan::is_precolored_interval(const Interval* i) {
170   return i->reg_num() < LinearScan::nof_regs;
171 }
172 
is_virtual_interval(const Interval * i)173 bool LinearScan::is_virtual_interval(const Interval* i) {
174   return i->reg_num() >= LIR_OprDesc::vreg_base;
175 }
176 
is_precolored_cpu_interval(const Interval * i)177 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
178   return i->reg_num() < LinearScan::nof_cpu_regs;
179 }
180 
is_virtual_cpu_interval(const Interval * i)181 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
182 #if defined(__SOFTFP__) || defined(E500V2)
183   return i->reg_num() >= LIR_OprDesc::vreg_base;
184 #else
185   return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE);
186 #endif // __SOFTFP__ or E500V2
187 }
188 
is_precolored_fpu_interval(const Interval * i)189 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
190   return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
191 }
192 
is_virtual_fpu_interval(const Interval * i)193 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
194 #if defined(__SOFTFP__) || defined(E500V2)
195   return false;
196 #else
197   return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE);
198 #endif // __SOFTFP__ or E500V2
199 }
200 
is_in_fpu_register(const Interval * i)201 bool LinearScan::is_in_fpu_register(const Interval* i) {
202   // fixed intervals not needed for FPU stack allocation
203   return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
204 }
205 
is_oop_interval(const Interval * i)206 bool LinearScan::is_oop_interval(const Interval* i) {
207   // fixed intervals never contain oops
208   return i->reg_num() >= nof_regs && i->type() == T_OBJECT;
209 }
210 
211 
212 // ********** General helper functions
213 
214 // compute next unused stack index that can be used for spilling
allocate_spill_slot(bool double_word)215 int LinearScan::allocate_spill_slot(bool double_word) {
216   int spill_slot;
217   if (double_word) {
218     if ((_max_spills & 1) == 1) {
219       // alignment of double-word values
220       // the hole because of the alignment is filled with the next single-word value
221       assert(_unused_spill_slot == -1, "wasting a spill slot");
222       _unused_spill_slot = _max_spills;
223       _max_spills++;
224     }
225     spill_slot = _max_spills;
226     _max_spills += 2;
227 
228   } else if (_unused_spill_slot != -1) {
229     // re-use hole that was the result of a previous double-word alignment
230     spill_slot = _unused_spill_slot;
231     _unused_spill_slot = -1;
232 
233   } else {
234     spill_slot = _max_spills;
235     _max_spills++;
236   }
237 
238   int result = spill_slot + LinearScan::nof_regs + frame_map()->argcount();
239 
240   // the class OopMapValue uses only 11 bits for storing the name of the
241   // oop location. So a stack slot bigger than 2^11 leads to an overflow
242   // that is not reported in product builds. Prevent this by checking the
243   // spill slot here (altough this value and the later used location name
244   // are slightly different)
245   if (result > 2000) {
246     bailout("too many stack slots used");
247   }
248 
249   return result;
250 }
251 
assign_spill_slot(Interval * it)252 void LinearScan::assign_spill_slot(Interval* it) {
253   // assign the canonical spill slot of the parent (if a part of the interval
254   // is already spilled) or allocate a new spill slot
255   if (it->canonical_spill_slot() >= 0) {
256     it->assign_reg(it->canonical_spill_slot());
257   } else {
258     int spill = allocate_spill_slot(type2spill_size[it->type()] == 2);
259     it->set_canonical_spill_slot(spill);
260     it->assign_reg(spill);
261   }
262 }
263 
propagate_spill_slots()264 void LinearScan::propagate_spill_slots() {
265   if (!frame_map()->finalize_frame(max_spills())) {
266     bailout("frame too large");
267   }
268 }
269 
270 // create a new interval with a predefined reg_num
271 // (only used for parent intervals that are created during the building phase)
create_interval(int reg_num)272 Interval* LinearScan::create_interval(int reg_num) {
273   assert(_intervals.at(reg_num) == NULL, "overwriting exisiting interval");
274 
275   Interval* interval = new Interval(reg_num);
276   _intervals.at_put(reg_num, interval);
277 
278   // assign register number for precolored intervals
279   if (reg_num < LIR_OprDesc::vreg_base) {
280     interval->assign_reg(reg_num);
281   }
282   return interval;
283 }
284 
285 // assign a new reg_num to the interval and append it to the list of intervals
286 // (only used for child intervals that are created during register allocation)
append_interval(Interval * it)287 void LinearScan::append_interval(Interval* it) {
288   it->set_reg_num(_intervals.length());
289   _intervals.append(it);
290   IntervalList* new_intervals = _new_intervals_from_allocation;
291   if (new_intervals == NULL) {
292     new_intervals = _new_intervals_from_allocation = new IntervalList();
293   }
294   new_intervals->append(it);
295 }
296 
297 // copy the vreg-flags if an interval is split
copy_register_flags(Interval * from,Interval * to)298 void LinearScan::copy_register_flags(Interval* from, Interval* to) {
299   if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::byte_reg)) {
300     gen()->set_vreg_flag(to->reg_num(), LIRGenerator::byte_reg);
301   }
302   if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::callee_saved)) {
303     gen()->set_vreg_flag(to->reg_num(), LIRGenerator::callee_saved);
304   }
305 
306   // Note: do not copy the must_start_in_memory flag because it is not necessary for child
307   //       intervals (only the very beginning of the interval must be in memory)
308 }
309 
310 
311 // ********** spill move optimization
312 // eliminate moves from register to stack if stack slot is known to be correct
313 
314 // called during building of intervals
change_spill_definition_pos(Interval * interval,int def_pos)315 void LinearScan::change_spill_definition_pos(Interval* interval, int def_pos) {
316   assert(interval->is_split_parent(), "can only be called for split parents");
317 
318   switch (interval->spill_state()) {
319     case noDefinitionFound:
320       assert(interval->spill_definition_pos() == -1, "must no be set before");
321       interval->set_spill_definition_pos(def_pos);
322       interval->set_spill_state(oneDefinitionFound);
323       break;
324 
325     case oneDefinitionFound:
326       assert(def_pos <= interval->spill_definition_pos(), "positions are processed in reverse order when intervals are created");
327       if (def_pos < interval->spill_definition_pos() - 2) {
328         // second definition found, so no spill optimization possible for this interval
329         interval->set_spill_state(noOptimization);
330       } else {
331         // two consecutive definitions (because of two-operand LIR form)
332         assert(block_of_op_with_id(def_pos) == block_of_op_with_id(interval->spill_definition_pos()), "block must be equal");
333       }
334       break;
335 
336     case noOptimization:
337       // nothing to do
338       break;
339 
340     default:
341       assert(false, "other states not allowed at this time");
342   }
343 }
344 
345 // called during register allocation
change_spill_state(Interval * interval,int spill_pos)346 void LinearScan::change_spill_state(Interval* interval, int spill_pos) {
347   switch (interval->spill_state()) {
348     case oneDefinitionFound: {
349       int def_loop_depth = block_of_op_with_id(interval->spill_definition_pos())->loop_depth();
350       int spill_loop_depth = block_of_op_with_id(spill_pos)->loop_depth();
351 
352       if (def_loop_depth < spill_loop_depth) {
353         // the loop depth of the spilling position is higher then the loop depth
354         // at the definition of the interval -> move write to memory out of loop
355         // by storing at definitin of the interval
356         interval->set_spill_state(storeAtDefinition);
357       } else {
358         // the interval is currently spilled only once, so for now there is no
359         // reason to store the interval at the definition
360         interval->set_spill_state(oneMoveInserted);
361       }
362       break;
363     }
364 
365     case oneMoveInserted: {
366       // the interval is spilled more then once, so it is better to store it to
367       // memory at the definition
368       interval->set_spill_state(storeAtDefinition);
369       break;
370     }
371 
372     case storeAtDefinition:
373     case startInMemory:
374     case noOptimization:
375     case noDefinitionFound:
376       // nothing to do
377       break;
378 
379     default:
380       assert(false, "other states not allowed at this time");
381   }
382 }
383 
384 
must_store_at_definition(const Interval * i)385 bool LinearScan::must_store_at_definition(const Interval* i) {
386   return i->is_split_parent() && i->spill_state() == storeAtDefinition;
387 }
388 
389 // called once before asignment of register numbers
eliminate_spill_moves()390 void LinearScan::eliminate_spill_moves() {
391   TIME_LINEAR_SCAN(timer_eliminate_spill_moves);
392   TRACE_LINEAR_SCAN(3, tty->print_cr("***** Eliminating unnecessary spill moves"));
393 
394   // collect all intervals that must be stored after their definion.
395   // the list is sorted by Interval::spill_definition_pos
396   Interval* interval;
397   Interval* temp_list;
398   create_unhandled_lists(&interval, &temp_list, must_store_at_definition, NULL);
399 
400 #ifdef ASSERT
401   Interval* prev = NULL;
402   Interval* temp = interval;
403   while (temp != Interval::end()) {
404     assert(temp->spill_definition_pos() > 0, "invalid spill definition pos");
405     if (prev != NULL) {
406       assert(temp->from() >= prev->from(), "intervals not sorted");
407       assert(temp->spill_definition_pos() >= prev->spill_definition_pos(), "when intervals are sorted by from, then they must also be sorted by spill_definition_pos");
408     }
409 
410     assert(temp->canonical_spill_slot() >= LinearScan::nof_regs, "interval has no spill slot assigned");
411     assert(temp->spill_definition_pos() >= temp->from(), "invalid order");
412     assert(temp->spill_definition_pos() <= temp->from() + 2, "only intervals defined once at their start-pos can be optimized");
413 
414     TRACE_LINEAR_SCAN(4, tty->print_cr("interval %d (from %d to %d) must be stored at %d", temp->reg_num(), temp->from(), temp->to(), temp->spill_definition_pos()));
415 
416     temp = temp->next();
417   }
418 #endif
419 
420   LIR_InsertionBuffer insertion_buffer;
421   int num_blocks = block_count();
422   for (int i = 0; i < num_blocks; i++) {
423     BlockBegin* block = block_at(i);
424     LIR_OpList* instructions = block->lir()->instructions_list();
425     int         num_inst = instructions->length();
426     bool        has_new = false;
427 
428     // iterate all instructions of the block. skip the first because it is always a label
429     for (int j = 1; j < num_inst; j++) {
430       LIR_Op* op = instructions->at(j);
431       int op_id = op->id();
432 
433       if (op_id == -1) {
434         // remove move from register to stack if the stack slot is guaranteed to be correct.
435         // only moves that have been inserted by LinearScan can be removed.
436         assert(op->code() == lir_move, "only moves can have a op_id of -1");
437         assert(op->as_Op1() != NULL, "move must be LIR_Op1");
438         assert(op->as_Op1()->result_opr()->is_virtual(), "LinearScan inserts only moves to virtual registers");
439 
440         LIR_Op1* op1 = (LIR_Op1*)op;
441         Interval* interval = interval_at(op1->result_opr()->vreg_number());
442 
443         if (interval->assigned_reg() >= LinearScan::nof_regs && interval->always_in_memory()) {
444           // move target is a stack slot that is always correct, so eliminate instruction
445           TRACE_LINEAR_SCAN(4, tty->print_cr("eliminating move from interval %d to %d", op1->in_opr()->vreg_number(), op1->result_opr()->vreg_number()));
446           instructions->at_put(j, NULL); // NULL-instructions are deleted by assign_reg_num
447         }
448 
449       } else {
450         // insert move from register to stack just after the beginning of the interval
451         assert(interval == Interval::end() || interval->spill_definition_pos() >= op_id, "invalid order");
452         assert(interval == Interval::end() || (interval->is_split_parent() && interval->spill_state() == storeAtDefinition), "invalid interval");
453 
454         while (interval != Interval::end() && interval->spill_definition_pos() == op_id) {
455           if (!has_new) {
456             // prepare insertion buffer (appended when all instructions of the block are processed)
457             insertion_buffer.init(block->lir());
458             has_new = true;
459           }
460 
461           LIR_Opr from_opr = operand_for_interval(interval);
462           LIR_Opr to_opr = canonical_spill_opr(interval);
463           assert(from_opr->is_fixed_cpu() || from_opr->is_fixed_fpu(), "from operand must be a register");
464           assert(to_opr->is_stack(), "to operand must be a stack slot");
465 
466           insertion_buffer.move(j, from_opr, to_opr);
467           TRACE_LINEAR_SCAN(4, tty->print_cr("inserting move after definition of interval %d to stack slot %d at op_id %d", interval->reg_num(), interval->canonical_spill_slot() - LinearScan::nof_regs, op_id));
468 
469           interval = interval->next();
470         }
471       }
472     } // end of instruction iteration
473 
474     if (has_new) {
475       block->lir()->append(&insertion_buffer);
476     }
477   } // end of block iteration
478 
479   assert(interval == Interval::end(), "missed an interval");
480 }
481 
482 
483 // ********** Phase 1: number all instructions in all blocks
484 // Compute depth-first and linear scan block orders, and number LIR_Op nodes for linear scan.
485 
number_instructions()486 void LinearScan::number_instructions() {
487   {
488     // dummy-timer to measure the cost of the timer itself
489     // (this time is then subtracted from all other timers to get the real value)
490     TIME_LINEAR_SCAN(timer_do_nothing);
491   }
492   TIME_LINEAR_SCAN(timer_number_instructions);
493 
494   // Assign IDs to LIR nodes and build a mapping, lir_ops, from ID to LIR_Op node.
495   int num_blocks = block_count();
496   int num_instructions = 0;
497   int i;
498   for (i = 0; i < num_blocks; i++) {
499     num_instructions += block_at(i)->lir()->instructions_list()->length();
500   }
501 
502   // initialize with correct length
503   _lir_ops = LIR_OpArray(num_instructions, num_instructions, NULL);
504   _block_of_op = BlockBeginArray(num_instructions, num_instructions, NULL);
505 
506   int op_id = 0;
507   int idx = 0;
508 
509   for (i = 0; i < num_blocks; i++) {
510     BlockBegin* block = block_at(i);
511     block->set_first_lir_instruction_id(op_id);
512     LIR_OpList* instructions = block->lir()->instructions_list();
513 
514     int num_inst = instructions->length();
515     for (int j = 0; j < num_inst; j++) {
516       LIR_Op* op = instructions->at(j);
517       op->set_id(op_id);
518 
519       _lir_ops.at_put(idx, op);
520       _block_of_op.at_put(idx, block);
521       assert(lir_op_with_id(op_id) == op, "must match");
522 
523       idx++;
524       op_id += 2; // numbering of lir_ops by two
525     }
526     block->set_last_lir_instruction_id(op_id - 2);
527   }
528   assert(idx == num_instructions, "must match");
529   assert(idx * 2 == op_id, "must match");
530 
531   _has_call.initialize(num_instructions);
532   _has_info.initialize(num_instructions);
533 }
534 
535 
536 // ********** Phase 2: compute local live sets separately for each block
537 // (sets live_gen and live_kill for each block)
538 
set_live_gen_kill(Value value,LIR_Op * op,BitMap & live_gen,BitMap & live_kill)539 void LinearScan::set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill) {
540   LIR_Opr opr = value->operand();
541   Constant* con = value->as_Constant();
542 
543   // check some asumptions about debug information
544   assert(!value->type()->is_illegal(), "if this local is used by the interpreter it shouldn't be of indeterminate type");
545   assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands");
546   assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands");
547 
548   if ((con == NULL || con->is_pinned()) && opr->is_register()) {
549     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
550     int reg = opr->vreg_number();
551     if (!live_kill.at(reg)) {
552       live_gen.set_bit(reg);
553       TRACE_LINEAR_SCAN(4, tty->print_cr("  Setting live_gen for value %c%d, LIR op_id %d, register number %d", value->type()->tchar(), value->id(), op->id(), reg));
554     }
555   }
556 }
557 
558 
compute_local_live_sets()559 void LinearScan::compute_local_live_sets() {
560   TIME_LINEAR_SCAN(timer_compute_local_live_sets);
561 
562   int  num_blocks = block_count();
563   int  live_size = live_set_size();
564   bool local_has_fpu_registers = false;
565   int  local_num_calls = 0;
566   LIR_OpVisitState visitor;
567 
568   BitMap2D local_interval_in_loop = BitMap2D(_num_virtual_regs, num_loops());
569 
570   // iterate all blocks
571   for (int i = 0; i < num_blocks; i++) {
572     BlockBegin* block = block_at(i);
573 
574     ResourceBitMap live_gen(live_size);
575     ResourceBitMap live_kill(live_size);
576 
577     if (block->is_set(BlockBegin::exception_entry_flag)) {
578       // Phi functions at the begin of an exception handler are
579       // implicitly defined (= killed) at the beginning of the block.
580       for_each_phi_fun(block, phi,
581         if (!phi->is_illegal()) { live_kill.set_bit(phi->operand()->vreg_number()); }
582       );
583     }
584 
585     LIR_OpList* instructions = block->lir()->instructions_list();
586     int num_inst = instructions->length();
587 
588     // iterate all instructions of the block. skip the first because it is always a label
589     assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
590     for (int j = 1; j < num_inst; j++) {
591       LIR_Op* op = instructions->at(j);
592 
593       // visit operation to collect all operands
594       visitor.visit(op);
595 
596       if (visitor.has_call()) {
597         _has_call.set_bit(op->id() >> 1);
598         local_num_calls++;
599       }
600       if (visitor.info_count() > 0) {
601         _has_info.set_bit(op->id() >> 1);
602       }
603 
604       // iterate input operands of instruction
605       int k, n, reg;
606       n = visitor.opr_count(LIR_OpVisitState::inputMode);
607       for (k = 0; k < n; k++) {
608         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k);
609         assert(opr->is_register(), "visitor should only return register operands");
610 
611         if (opr->is_virtual_register()) {
612           assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
613           reg = opr->vreg_number();
614           if (!live_kill.at(reg)) {
615             live_gen.set_bit(reg);
616             TRACE_LINEAR_SCAN(4, tty->print_cr("  Setting live_gen for register %d at instruction %d", reg, op->id()));
617           }
618           if (block->loop_index() >= 0) {
619             local_interval_in_loop.set_bit(reg, block->loop_index());
620           }
621           local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
622         }
623 
624 #ifdef ASSERT
625         // fixed intervals are never live at block boundaries, so
626         // they need not be processed in live sets.
627         // this is checked by these assertions to be sure about it.
628         // the entry block may have incoming values in registers, which is ok.
629         if (!opr->is_virtual_register() && block != ir()->start()) {
630           reg = reg_num(opr);
631           if (is_processed_reg_num(reg)) {
632             assert(live_kill.at(reg), "using fixed register that is not defined in this block");
633           }
634           reg = reg_numHi(opr);
635           if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
636             assert(live_kill.at(reg), "using fixed register that is not defined in this block");
637           }
638         }
639 #endif
640       }
641 
642       // Add uses of live locals from interpreter's point of view for proper debug information generation
643       n = visitor.info_count();
644       for (k = 0; k < n; k++) {
645         CodeEmitInfo* info = visitor.info_at(k);
646         ValueStack* stack = info->stack();
647         for_each_state_value(stack, value,
648           set_live_gen_kill(value, op, live_gen, live_kill);
649           local_has_fpu_registers = local_has_fpu_registers || value->type()->is_float_kind();
650         );
651       }
652 
653       // iterate temp operands of instruction
654       n = visitor.opr_count(LIR_OpVisitState::tempMode);
655       for (k = 0; k < n; k++) {
656         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k);
657         assert(opr->is_register(), "visitor should only return register operands");
658 
659         if (opr->is_virtual_register()) {
660           assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
661           reg = opr->vreg_number();
662           live_kill.set_bit(reg);
663           if (block->loop_index() >= 0) {
664             local_interval_in_loop.set_bit(reg, block->loop_index());
665           }
666           local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
667         }
668 
669 #ifdef ASSERT
670         // fixed intervals are never live at block boundaries, so
671         // they need not be processed in live sets
672         // process them only in debug mode so that this can be checked
673         if (!opr->is_virtual_register()) {
674           reg = reg_num(opr);
675           if (is_processed_reg_num(reg)) {
676             live_kill.set_bit(reg_num(opr));
677           }
678           reg = reg_numHi(opr);
679           if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
680             live_kill.set_bit(reg);
681           }
682         }
683 #endif
684       }
685 
686       // iterate output operands of instruction
687       n = visitor.opr_count(LIR_OpVisitState::outputMode);
688       for (k = 0; k < n; k++) {
689         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k);
690         assert(opr->is_register(), "visitor should only return register operands");
691 
692         if (opr->is_virtual_register()) {
693           assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
694           reg = opr->vreg_number();
695           live_kill.set_bit(reg);
696           if (block->loop_index() >= 0) {
697             local_interval_in_loop.set_bit(reg, block->loop_index());
698           }
699           local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
700         }
701 
702 #ifdef ASSERT
703         // fixed intervals are never live at block boundaries, so
704         // they need not be processed in live sets
705         // process them only in debug mode so that this can be checked
706         if (!opr->is_virtual_register()) {
707           reg = reg_num(opr);
708           if (is_processed_reg_num(reg)) {
709             live_kill.set_bit(reg_num(opr));
710           }
711           reg = reg_numHi(opr);
712           if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
713             live_kill.set_bit(reg);
714           }
715         }
716 #endif
717       }
718     } // end of instruction iteration
719 
720     block->set_live_gen (live_gen);
721     block->set_live_kill(live_kill);
722     block->set_live_in  (ResourceBitMap(live_size));
723     block->set_live_out (ResourceBitMap(live_size));
724 
725     TRACE_LINEAR_SCAN(4, tty->print("live_gen  B%d ", block->block_id()); print_bitmap(block->live_gen()));
726     TRACE_LINEAR_SCAN(4, tty->print("live_kill B%d ", block->block_id()); print_bitmap(block->live_kill()));
727   } // end of block iteration
728 
729   // propagate local calculated information into LinearScan object
730   _has_fpu_registers = local_has_fpu_registers;
731   compilation()->set_has_fpu_code(local_has_fpu_registers);
732 
733   _num_calls = local_num_calls;
734   _interval_in_loop = local_interval_in_loop;
735 }
736 
737 
738 // ********** Phase 3: perform a backward dataflow analysis to compute global live sets
739 // (sets live_in and live_out for each block)
740 
compute_global_live_sets()741 void LinearScan::compute_global_live_sets() {
742   TIME_LINEAR_SCAN(timer_compute_global_live_sets);
743 
744   int  num_blocks = block_count();
745   bool change_occurred;
746   bool change_occurred_in_block;
747   int  iteration_count = 0;
748   ResourceBitMap live_out(live_set_size()); // scratch set for calculations
749 
750   // Perform a backward dataflow analysis to compute live_out and live_in for each block.
751   // The loop is executed until a fixpoint is reached (no changes in an iteration)
752   // Exception handlers must be processed because not all live values are
753   // present in the state array, e.g. because of global value numbering
754   do {
755     change_occurred = false;
756 
757     // iterate all blocks in reverse order
758     for (int i = num_blocks - 1; i >= 0; i--) {
759       BlockBegin* block = block_at(i);
760 
761       change_occurred_in_block = false;
762 
763       // live_out(block) is the union of live_in(sux), for successors sux of block
764       int n = block->number_of_sux();
765       int e = block->number_of_exception_handlers();
766       if (n + e > 0) {
767         // block has successors
768         if (n > 0) {
769           live_out.set_from(block->sux_at(0)->live_in());
770           for (int j = 1; j < n; j++) {
771             live_out.set_union(block->sux_at(j)->live_in());
772           }
773         } else {
774           live_out.clear();
775         }
776         for (int j = 0; j < e; j++) {
777           live_out.set_union(block->exception_handler_at(j)->live_in());
778         }
779 
780         if (!block->live_out().is_same(live_out)) {
781           // A change occurred.  Swap the old and new live out sets to avoid copying.
782           ResourceBitMap temp = block->live_out();
783           block->set_live_out(live_out);
784           live_out = temp;
785 
786           change_occurred = true;
787           change_occurred_in_block = true;
788         }
789       }
790 
791       if (iteration_count == 0 || change_occurred_in_block) {
792         // live_in(block) is the union of live_gen(block) with (live_out(block) & !live_kill(block))
793         // note: live_in has to be computed only in first iteration or if live_out has changed!
794         ResourceBitMap live_in = block->live_in();
795         live_in.set_from(block->live_out());
796         live_in.set_difference(block->live_kill());
797         live_in.set_union(block->live_gen());
798       }
799 
800 #ifndef PRODUCT
801       if (TraceLinearScanLevel >= 4) {
802         char c = ' ';
803         if (iteration_count == 0 || change_occurred_in_block) {
804           c = '*';
805         }
806         tty->print("(%d) live_in%c  B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_in());
807         tty->print("(%d) live_out%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_out());
808       }
809 #endif
810     }
811     iteration_count++;
812 
813     if (change_occurred && iteration_count > 50) {
814       BAILOUT("too many iterations in compute_global_live_sets");
815     }
816   } while (change_occurred);
817 
818 
819 #ifdef ASSERT
820   // check that fixed intervals are not live at block boundaries
821   // (live set must be empty at fixed intervals)
822   for (int i = 0; i < num_blocks; i++) {
823     BlockBegin* block = block_at(i);
824     for (int j = 0; j < LIR_OprDesc::vreg_base; j++) {
825       assert(block->live_in().at(j)  == false, "live_in  set of fixed register must be empty");
826       assert(block->live_out().at(j) == false, "live_out set of fixed register must be empty");
827       assert(block->live_gen().at(j) == false, "live_gen set of fixed register must be empty");
828     }
829   }
830 #endif
831 
832   // check that the live_in set of the first block is empty
833   ResourceBitMap live_in_args(ir()->start()->live_in().size());
834   if (!ir()->start()->live_in().is_same(live_in_args)) {
835 #ifdef ASSERT
836     tty->print_cr("Error: live_in set of first block must be empty (when this fails, virtual registers are used before they are defined)");
837     tty->print_cr("affected registers:");
838     print_bitmap(ir()->start()->live_in());
839 
840     // print some additional information to simplify debugging
841     for (unsigned int i = 0; i < ir()->start()->live_in().size(); i++) {
842       if (ir()->start()->live_in().at(i)) {
843         Instruction* instr = gen()->instruction_for_vreg(i);
844         tty->print_cr("* vreg %d (HIR instruction %c%d)", i, instr == NULL ? ' ' : instr->type()->tchar(), instr == NULL ? 0 : instr->id());
845 
846         for (int j = 0; j < num_blocks; j++) {
847           BlockBegin* block = block_at(j);
848           if (block->live_gen().at(i)) {
849             tty->print_cr("  used in block B%d", block->block_id());
850           }
851           if (block->live_kill().at(i)) {
852             tty->print_cr("  defined in block B%d", block->block_id());
853           }
854         }
855       }
856     }
857 
858 #endif
859     // when this fails, virtual registers are used before they are defined.
860     assert(false, "live_in set of first block must be empty");
861     // bailout of if this occurs in product mode.
862     bailout("live_in set of first block not empty");
863   }
864 }
865 
866 
867 // ********** Phase 4: build intervals
868 // (fills the list _intervals)
869 
add_use(Value value,int from,int to,IntervalUseKind use_kind)870 void LinearScan::add_use(Value value, int from, int to, IntervalUseKind use_kind) {
871   assert(!value->type()->is_illegal(), "if this value is used by the interpreter it shouldn't be of indeterminate type");
872   LIR_Opr opr = value->operand();
873   Constant* con = value->as_Constant();
874 
875   if ((con == NULL || con->is_pinned()) && opr->is_register()) {
876     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
877     add_use(opr, from, to, use_kind);
878   }
879 }
880 
881 
add_def(LIR_Opr opr,int def_pos,IntervalUseKind use_kind)882 void LinearScan::add_def(LIR_Opr opr, int def_pos, IntervalUseKind use_kind) {
883   TRACE_LINEAR_SCAN(2, tty->print(" def "); opr->print(tty); tty->print_cr(" def_pos %d (%d)", def_pos, use_kind));
884   assert(opr->is_register(), "should not be called otherwise");
885 
886   if (opr->is_virtual_register()) {
887     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
888     add_def(opr->vreg_number(), def_pos, use_kind, opr->type_register());
889 
890   } else {
891     int reg = reg_num(opr);
892     if (is_processed_reg_num(reg)) {
893       add_def(reg, def_pos, use_kind, opr->type_register());
894     }
895     reg = reg_numHi(opr);
896     if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
897       add_def(reg, def_pos, use_kind, opr->type_register());
898     }
899   }
900 }
901 
add_use(LIR_Opr opr,int from,int to,IntervalUseKind use_kind)902 void LinearScan::add_use(LIR_Opr opr, int from, int to, IntervalUseKind use_kind) {
903   TRACE_LINEAR_SCAN(2, tty->print(" use "); opr->print(tty); tty->print_cr(" from %d to %d (%d)", from, to, use_kind));
904   assert(opr->is_register(), "should not be called otherwise");
905 
906   if (opr->is_virtual_register()) {
907     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
908     add_use(opr->vreg_number(), from, to, use_kind, opr->type_register());
909 
910   } else {
911     int reg = reg_num(opr);
912     if (is_processed_reg_num(reg)) {
913       add_use(reg, from, to, use_kind, opr->type_register());
914     }
915     reg = reg_numHi(opr);
916     if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
917       add_use(reg, from, to, use_kind, opr->type_register());
918     }
919   }
920 }
921 
add_temp(LIR_Opr opr,int temp_pos,IntervalUseKind use_kind)922 void LinearScan::add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind) {
923   TRACE_LINEAR_SCAN(2, tty->print(" temp "); opr->print(tty); tty->print_cr(" temp_pos %d (%d)", temp_pos, use_kind));
924   assert(opr->is_register(), "should not be called otherwise");
925 
926   if (opr->is_virtual_register()) {
927     assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
928     add_temp(opr->vreg_number(), temp_pos, use_kind, opr->type_register());
929 
930   } else {
931     int reg = reg_num(opr);
932     if (is_processed_reg_num(reg)) {
933       add_temp(reg, temp_pos, use_kind, opr->type_register());
934     }
935     reg = reg_numHi(opr);
936     if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
937       add_temp(reg, temp_pos, use_kind, opr->type_register());
938     }
939   }
940 }
941 
942 
add_def(int reg_num,int def_pos,IntervalUseKind use_kind,BasicType type)943 void LinearScan::add_def(int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type) {
944   Interval* interval = interval_at(reg_num);
945   if (interval != NULL) {
946     assert(interval->reg_num() == reg_num, "wrong interval");
947 
948     if (type != T_ILLEGAL) {
949       interval->set_type(type);
950     }
951 
952     Range* r = interval->first();
953     if (r->from() <= def_pos) {
954       // Update the starting point (when a range is first created for a use, its
955       // start is the beginning of the current block until a def is encountered.)
956       r->set_from(def_pos);
957       interval->add_use_pos(def_pos, use_kind);
958 
959     } else {
960       // Dead value - make vacuous interval
961       // also add use_kind for dead intervals
962       interval->add_range(def_pos, def_pos + 1);
963       interval->add_use_pos(def_pos, use_kind);
964       TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: def of reg %d at %d occurs without use", reg_num, def_pos));
965     }
966 
967   } else {
968     // Dead value - make vacuous interval
969     // also add use_kind for dead intervals
970     interval = create_interval(reg_num);
971     if (type != T_ILLEGAL) {
972       interval->set_type(type);
973     }
974 
975     interval->add_range(def_pos, def_pos + 1);
976     interval->add_use_pos(def_pos, use_kind);
977     TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: dead value %d at %d in live intervals", reg_num, def_pos));
978   }
979 
980   change_spill_definition_pos(interval, def_pos);
981   if (use_kind == noUse && interval->spill_state() <= startInMemory) {
982         // detection of method-parameters and roundfp-results
983         // TODO: move this directly to position where use-kind is computed
984     interval->set_spill_state(startInMemory);
985   }
986 }
987 
add_use(int reg_num,int from,int to,IntervalUseKind use_kind,BasicType type)988 void LinearScan::add_use(int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type) {
989   Interval* interval = interval_at(reg_num);
990   if (interval == NULL) {
991     interval = create_interval(reg_num);
992   }
993   assert(interval->reg_num() == reg_num, "wrong interval");
994 
995   if (type != T_ILLEGAL) {
996     interval->set_type(type);
997   }
998 
999   interval->add_range(from, to);
1000   interval->add_use_pos(to, use_kind);
1001 }
1002 
add_temp(int reg_num,int temp_pos,IntervalUseKind use_kind,BasicType type)1003 void LinearScan::add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type) {
1004   Interval* interval = interval_at(reg_num);
1005   if (interval == NULL) {
1006     interval = create_interval(reg_num);
1007   }
1008   assert(interval->reg_num() == reg_num, "wrong interval");
1009 
1010   if (type != T_ILLEGAL) {
1011     interval->set_type(type);
1012   }
1013 
1014   interval->add_range(temp_pos, temp_pos + 1);
1015   interval->add_use_pos(temp_pos, use_kind);
1016 }
1017 
1018 
1019 // the results of this functions are used for optimizing spilling and reloading
1020 // if the functions return shouldHaveRegister and the interval is spilled,
1021 // it is not reloaded to a register.
use_kind_of_output_operand(LIR_Op * op,LIR_Opr opr)1022 IntervalUseKind LinearScan::use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr) {
1023   if (op->code() == lir_move) {
1024     assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1");
1025     LIR_Op1* move = (LIR_Op1*)op;
1026     LIR_Opr res = move->result_opr();
1027     bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory);
1028 
1029     if (result_in_memory) {
1030       // Begin of an interval with must_start_in_memory set.
1031       // This interval will always get a stack slot first, so return noUse.
1032       return noUse;
1033 
1034     } else if (move->in_opr()->is_stack()) {
1035       // method argument (condition must be equal to handle_method_arguments)
1036       return noUse;
1037 
1038     } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
1039       // Move from register to register
1040       if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
1041         // special handling of phi-function moves inside osr-entry blocks
1042         // input operand must have a register instead of output operand (leads to better register allocation)
1043         return shouldHaveRegister;
1044       }
1045     }
1046   }
1047 
1048   if (opr->is_virtual() &&
1049       gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::must_start_in_memory)) {
1050     // result is a stack-slot, so prevent immediate reloading
1051     return noUse;
1052   }
1053 
1054   // all other operands require a register
1055   return mustHaveRegister;
1056 }
1057 
use_kind_of_input_operand(LIR_Op * op,LIR_Opr opr)1058 IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
1059   if (op->code() == lir_move) {
1060     assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1");
1061     LIR_Op1* move = (LIR_Op1*)op;
1062     LIR_Opr res = move->result_opr();
1063     bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory);
1064 
1065     if (result_in_memory) {
1066       // Move to an interval with must_start_in_memory set.
1067       // To avoid moves from stack to stack (not allowed) force the input operand to a register
1068       return mustHaveRegister;
1069 
1070     } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
1071       // Move from register to register
1072       if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
1073         // special handling of phi-function moves inside osr-entry blocks
1074         // input operand must have a register instead of output operand (leads to better register allocation)
1075         return mustHaveRegister;
1076       }
1077 
1078       // The input operand is not forced to a register (moves from stack to register are allowed),
1079       // but it is faster if the input operand is in a register
1080       return shouldHaveRegister;
1081     }
1082   }
1083 
1084 
1085 #if defined(X86) || defined(S390)
1086   if (op->code() == lir_cmove) {
1087     // conditional moves can handle stack operands
1088     assert(op->result_opr()->is_register(), "result must always be in a register");
1089     return shouldHaveRegister;
1090   }
1091 
1092   // optimizations for second input operand of arithmehtic operations on Intel
1093   // this operand is allowed to be on the stack in some cases
1094   BasicType opr_type = opr->type_register();
1095   if (opr_type == T_FLOAT || opr_type == T_DOUBLE) {
1096     if ((UseSSE == 1 && opr_type == T_FLOAT) || UseSSE >= 2 S390_ONLY(|| true)) {
1097       // SSE float instruction (T_DOUBLE only supported with SSE2)
1098       switch (op->code()) {
1099         case lir_cmp:
1100         case lir_add:
1101         case lir_sub:
1102         case lir_mul:
1103         case lir_div:
1104         {
1105           assert(op->as_Op2() != NULL, "must be LIR_Op2");
1106           LIR_Op2* op2 = (LIR_Op2*)op;
1107           if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
1108             assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
1109             return shouldHaveRegister;
1110           }
1111         }
1112         default:
1113           break;
1114       }
1115     } else {
1116       // FPU stack float instruction
1117       switch (op->code()) {
1118         case lir_add:
1119         case lir_sub:
1120         case lir_mul:
1121         case lir_div:
1122         {
1123           assert(op->as_Op2() != NULL, "must be LIR_Op2");
1124           LIR_Op2* op2 = (LIR_Op2*)op;
1125           if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
1126             assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
1127             return shouldHaveRegister;
1128           }
1129         }
1130         default:
1131           break;
1132       }
1133     }
1134     // We want to sometimes use logical operations on pointers, in particular in GC barriers.
1135     // Since 64bit logical operations do not current support operands on stack, we have to make sure
1136     // T_OBJECT doesn't get spilled along with T_LONG.
1137   } else if (opr_type != T_LONG LP64_ONLY(&& opr_type != T_OBJECT)) {
1138     // integer instruction (note: long operands must always be in register)
1139     switch (op->code()) {
1140       case lir_cmp:
1141       case lir_add:
1142       case lir_sub:
1143       case lir_logic_and:
1144       case lir_logic_or:
1145       case lir_logic_xor:
1146       {
1147         assert(op->as_Op2() != NULL, "must be LIR_Op2");
1148         LIR_Op2* op2 = (LIR_Op2*)op;
1149         if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
1150           assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
1151           return shouldHaveRegister;
1152         }
1153       }
1154       default:
1155         break;
1156     }
1157   }
1158 #endif // X86 S390
1159 
1160   // all other operands require a register
1161   return mustHaveRegister;
1162 }
1163 
1164 
handle_method_arguments(LIR_Op * op)1165 void LinearScan::handle_method_arguments(LIR_Op* op) {
1166   // special handling for method arguments (moves from stack to virtual register):
1167   // the interval gets no register assigned, but the stack slot.
1168   // it is split before the first use by the register allocator.
1169 
1170   if (op->code() == lir_move) {
1171     assert(op->as_Op1() != NULL, "must be LIR_Op1");
1172     LIR_Op1* move = (LIR_Op1*)op;
1173 
1174     if (move->in_opr()->is_stack()) {
1175 #ifdef ASSERT
1176       int arg_size = compilation()->method()->arg_size();
1177       LIR_Opr o = move->in_opr();
1178       if (o->is_single_stack()) {
1179         assert(o->single_stack_ix() >= 0 && o->single_stack_ix() < arg_size, "out of range");
1180       } else if (o->is_double_stack()) {
1181         assert(o->double_stack_ix() >= 0 && o->double_stack_ix() < arg_size, "out of range");
1182       } else {
1183         ShouldNotReachHere();
1184       }
1185 
1186       assert(move->id() > 0, "invalid id");
1187       assert(block_of_op_with_id(move->id())->number_of_preds() == 0, "move from stack must be in first block");
1188       assert(move->result_opr()->is_virtual(), "result of move must be a virtual register");
1189 
1190       TRACE_LINEAR_SCAN(4, tty->print_cr("found move from stack slot %d to vreg %d", o->is_single_stack() ? o->single_stack_ix() : o->double_stack_ix(), reg_num(move->result_opr())));
1191 #endif
1192 
1193       Interval* interval = interval_at(reg_num(move->result_opr()));
1194 
1195       int stack_slot = LinearScan::nof_regs + (move->in_opr()->is_single_stack() ? move->in_opr()->single_stack_ix() : move->in_opr()->double_stack_ix());
1196       interval->set_canonical_spill_slot(stack_slot);
1197       interval->assign_reg(stack_slot);
1198     }
1199   }
1200 }
1201 
handle_doubleword_moves(LIR_Op * op)1202 void LinearScan::handle_doubleword_moves(LIR_Op* op) {
1203   // special handling for doubleword move from memory to register:
1204   // in this case the registers of the input address and the result
1205   // registers must not overlap -> add a temp range for the input registers
1206   if (op->code() == lir_move) {
1207     assert(op->as_Op1() != NULL, "must be LIR_Op1");
1208     LIR_Op1* move = (LIR_Op1*)op;
1209 
1210     if (move->result_opr()->is_double_cpu() && move->in_opr()->is_pointer()) {
1211       LIR_Address* address = move->in_opr()->as_address_ptr();
1212       if (address != NULL) {
1213         if (address->base()->is_valid()) {
1214           add_temp(address->base(), op->id(), noUse);
1215         }
1216         if (address->index()->is_valid()) {
1217           add_temp(address->index(), op->id(), noUse);
1218         }
1219       }
1220     }
1221   }
1222 }
1223 
add_register_hints(LIR_Op * op)1224 void LinearScan::add_register_hints(LIR_Op* op) {
1225   switch (op->code()) {
1226     case lir_move:      // fall through
1227     case lir_convert: {
1228       assert(op->as_Op1() != NULL, "lir_move, lir_convert must be LIR_Op1");
1229       LIR_Op1* move = (LIR_Op1*)op;
1230 
1231       LIR_Opr move_from = move->in_opr();
1232       LIR_Opr move_to = move->result_opr();
1233 
1234       if (move_to->is_register() && move_from->is_register()) {
1235         Interval* from = interval_at(reg_num(move_from));
1236         Interval* to = interval_at(reg_num(move_to));
1237         if (from != NULL && to != NULL) {
1238           to->set_register_hint(from);
1239           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num()));
1240         }
1241       }
1242       break;
1243     }
1244     case lir_cmove: {
1245       assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2");
1246       LIR_Op2* cmove = (LIR_Op2*)op;
1247 
1248       LIR_Opr move_from = cmove->in_opr1();
1249       LIR_Opr move_to = cmove->result_opr();
1250 
1251       if (move_to->is_register() && move_from->is_register()) {
1252         Interval* from = interval_at(reg_num(move_from));
1253         Interval* to = interval_at(reg_num(move_to));
1254         if (from != NULL && to != NULL) {
1255           to->set_register_hint(from);
1256           TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num()));
1257         }
1258       }
1259       break;
1260     }
1261     default:
1262       break;
1263   }
1264 }
1265 
1266 
build_intervals()1267 void LinearScan::build_intervals() {
1268   TIME_LINEAR_SCAN(timer_build_intervals);
1269 
1270   // initialize interval list with expected number of intervals
1271   // (32 is added to have some space for split children without having to resize the list)
1272   _intervals = IntervalList(num_virtual_regs() + 32);
1273   // initialize all slots that are used by build_intervals
1274   _intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL);
1275 
1276   // create a list with all caller-save registers (cpu, fpu, xmm)
1277   // when an instruction is a call, a temp range is created for all these registers
1278   int num_caller_save_registers = 0;
1279   int caller_save_registers[LinearScan::nof_regs];
1280 
1281   int i;
1282   for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) {
1283     LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
1284     assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1285     assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1286     caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1287   }
1288 
1289   // temp ranges for fpu registers are only created when the method has
1290   // virtual fpu operands. Otherwise no allocation for fpu registers is
1291   // performed and so the temp ranges would be useless
1292   if (has_fpu_registers()) {
1293 #ifdef X86
1294     if (UseSSE < 2) {
1295 #endif
1296       for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
1297         LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
1298         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1299         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1300         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1301       }
1302 #ifdef X86
1303     }
1304     if (UseSSE > 0) {
1305       int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms();
1306       for (i = 0; i < num_caller_save_xmm_regs; i ++) {
1307         LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(i);
1308         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1309         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1310         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1311       }
1312     }
1313 #endif
1314   }
1315   assert(num_caller_save_registers <= LinearScan::nof_regs, "out of bounds");
1316 
1317 
1318   LIR_OpVisitState visitor;
1319 
1320   // iterate all blocks in reverse order
1321   for (i = block_count() - 1; i >= 0; i--) {
1322     BlockBegin* block = block_at(i);
1323     LIR_OpList* instructions = block->lir()->instructions_list();
1324     int         block_from =   block->first_lir_instruction_id();
1325     int         block_to =     block->last_lir_instruction_id();
1326 
1327     assert(block_from == instructions->at(0)->id(), "must be");
1328     assert(block_to   == instructions->at(instructions->length() - 1)->id(), "must be");
1329 
1330     // Update intervals for registers live at the end of this block;
1331     ResourceBitMap live = block->live_out();
1332     int size = (int)live.size();
1333     for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) {
1334       assert(live.at(number), "should not stop here otherwise");
1335       assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds");
1336       TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2));
1337 
1338       add_use(number, block_from, block_to + 2, noUse, T_ILLEGAL);
1339 
1340       // add special use positions for loop-end blocks when the
1341       // interval is used anywhere inside this loop.  It's possible
1342       // that the block was part of a non-natural loop, so it might
1343       // have an invalid loop index.
1344       if (block->is_set(BlockBegin::linear_scan_loop_end_flag) &&
1345           block->loop_index() != -1 &&
1346           is_interval_in_loop(number, block->loop_index())) {
1347         interval_at(number)->add_use_pos(block_to + 1, loopEndMarker);
1348       }
1349     }
1350 
1351     // iterate all instructions of the block in reverse order.
1352     // skip the first instruction because it is always a label
1353     // definitions of intervals are processed before uses
1354     assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
1355     for (int j = instructions->length() - 1; j >= 1; j--) {
1356       LIR_Op* op = instructions->at(j);
1357       int op_id = op->id();
1358 
1359       // visit operation to collect all operands
1360       visitor.visit(op);
1361 
1362       // add a temp range for each register if operation destroys caller-save registers
1363       if (visitor.has_call()) {
1364         for (int k = 0; k < num_caller_save_registers; k++) {
1365           add_temp(caller_save_registers[k], op_id, noUse, T_ILLEGAL);
1366         }
1367         TRACE_LINEAR_SCAN(4, tty->print_cr("operation destroys all caller-save registers"));
1368       }
1369 
1370       // Add any platform dependent temps
1371       pd_add_temps(op);
1372 
1373       // visit definitions (output and temp operands)
1374       int k, n;
1375       n = visitor.opr_count(LIR_OpVisitState::outputMode);
1376       for (k = 0; k < n; k++) {
1377         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k);
1378         assert(opr->is_register(), "visitor should only return register operands");
1379         add_def(opr, op_id, use_kind_of_output_operand(op, opr));
1380       }
1381 
1382       n = visitor.opr_count(LIR_OpVisitState::tempMode);
1383       for (k = 0; k < n; k++) {
1384         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k);
1385         assert(opr->is_register(), "visitor should only return register operands");
1386         add_temp(opr, op_id, mustHaveRegister);
1387       }
1388 
1389       // visit uses (input operands)
1390       n = visitor.opr_count(LIR_OpVisitState::inputMode);
1391       for (k = 0; k < n; k++) {
1392         LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k);
1393         assert(opr->is_register(), "visitor should only return register operands");
1394         add_use(opr, block_from, op_id, use_kind_of_input_operand(op, opr));
1395       }
1396 
1397       // Add uses of live locals from interpreter's point of view for proper
1398       // debug information generation
1399       // Treat these operands as temp values (if the life range is extended
1400       // to a call site, the value would be in a register at the call otherwise)
1401       n = visitor.info_count();
1402       for (k = 0; k < n; k++) {
1403         CodeEmitInfo* info = visitor.info_at(k);
1404         ValueStack* stack = info->stack();
1405         for_each_state_value(stack, value,
1406           add_use(value, block_from, op_id + 1, noUse);
1407         );
1408       }
1409 
1410       // special steps for some instructions (especially moves)
1411       handle_method_arguments(op);
1412       handle_doubleword_moves(op);
1413       add_register_hints(op);
1414 
1415     } // end of instruction iteration
1416   } // end of block iteration
1417 
1418 
1419   // add the range [0, 1[ to all fixed intervals
1420   // -> the register allocator need not handle unhandled fixed intervals
1421   for (int n = 0; n < LinearScan::nof_regs; n++) {
1422     Interval* interval = interval_at(n);
1423     if (interval != NULL) {
1424       interval->add_range(0, 1);
1425     }
1426   }
1427 }
1428 
1429 
1430 // ********** Phase 5: actual register allocation
1431 
interval_cmp(Interval ** a,Interval ** b)1432 int LinearScan::interval_cmp(Interval** a, Interval** b) {
1433   if (*a != NULL) {
1434     if (*b != NULL) {
1435       return (*a)->from() - (*b)->from();
1436     } else {
1437       return -1;
1438     }
1439   } else {
1440     if (*b != NULL) {
1441       return 1;
1442     } else {
1443       return 0;
1444     }
1445   }
1446 }
1447 
1448 #ifndef PRODUCT
interval_cmp(Interval * const & l,Interval * const & r)1449 int interval_cmp(Interval* const& l, Interval* const& r) {
1450   return l->from() - r->from();
1451 }
1452 
find_interval(Interval * interval,IntervalArray * intervals)1453 bool find_interval(Interval* interval, IntervalArray* intervals) {
1454   bool found;
1455   int idx = intervals->find_sorted<Interval*, interval_cmp>(interval, found);
1456 
1457   if (!found) {
1458     return false;
1459   }
1460 
1461   int from = interval->from();
1462 
1463   // The index we've found using binary search is pointing to an interval
1464   // that is defined in the same place as the interval we were looking for.
1465   // So now we have to look around that index and find exact interval.
1466   for (int i = idx; i >= 0; i--) {
1467     if (intervals->at(i) == interval) {
1468       return true;
1469     }
1470     if (intervals->at(i)->from() != from) {
1471       break;
1472     }
1473   }
1474 
1475   for (int i = idx + 1; i < intervals->length(); i++) {
1476     if (intervals->at(i) == interval) {
1477       return true;
1478     }
1479     if (intervals->at(i)->from() != from) {
1480       break;
1481     }
1482   }
1483 
1484   return false;
1485 }
1486 
is_sorted(IntervalArray * intervals)1487 bool LinearScan::is_sorted(IntervalArray* intervals) {
1488   int from = -1;
1489   int null_count = 0;
1490 
1491   for (int i = 0; i < intervals->length(); i++) {
1492     Interval* it = intervals->at(i);
1493     if (it != NULL) {
1494       assert(from <= it->from(), "Intervals are unordered");
1495       from = it->from();
1496     } else {
1497       null_count++;
1498     }
1499   }
1500 
1501   assert(null_count == 0, "Sorted intervals should not contain nulls");
1502 
1503   null_count = 0;
1504 
1505   for (int i = 0; i < interval_count(); i++) {
1506     Interval* interval = interval_at(i);
1507     if (interval != NULL) {
1508       assert(find_interval(interval, intervals), "Lists do not contain same intervals");
1509     } else {
1510       null_count++;
1511     }
1512   }
1513 
1514   assert(interval_count() - null_count == intervals->length(),
1515       "Sorted list should contain the same amount of non-NULL intervals as unsorted list");
1516 
1517   return true;
1518 }
1519 #endif
1520 
add_to_list(Interval ** first,Interval ** prev,Interval * interval)1521 void LinearScan::add_to_list(Interval** first, Interval** prev, Interval* interval) {
1522   if (*prev != NULL) {
1523     (*prev)->set_next(interval);
1524   } else {
1525     *first = interval;
1526   }
1527   *prev = interval;
1528 }
1529 
create_unhandled_lists(Interval ** list1,Interval ** list2,bool (is_list1)(const Interval * i),bool (is_list2)(const Interval * i))1530 void LinearScan::create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i)) {
1531   assert(is_sorted(_sorted_intervals), "interval list is not sorted");
1532 
1533   *list1 = *list2 = Interval::end();
1534 
1535   Interval* list1_prev = NULL;
1536   Interval* list2_prev = NULL;
1537   Interval* v;
1538 
1539   const int n = _sorted_intervals->length();
1540   for (int i = 0; i < n; i++) {
1541     v = _sorted_intervals->at(i);
1542     if (v == NULL) continue;
1543 
1544     if (is_list1(v)) {
1545       add_to_list(list1, &list1_prev, v);
1546     } else if (is_list2 == NULL || is_list2(v)) {
1547       add_to_list(list2, &list2_prev, v);
1548     }
1549   }
1550 
1551   if (list1_prev != NULL) list1_prev->set_next(Interval::end());
1552   if (list2_prev != NULL) list2_prev->set_next(Interval::end());
1553 
1554   assert(list1_prev == NULL || list1_prev->next() == Interval::end(), "linear list ends not with sentinel");
1555   assert(list2_prev == NULL || list2_prev->next() == Interval::end(), "linear list ends not with sentinel");
1556 }
1557 
1558 
sort_intervals_before_allocation()1559 void LinearScan::sort_intervals_before_allocation() {
1560   TIME_LINEAR_SCAN(timer_sort_intervals_before);
1561 
1562   if (_needs_full_resort) {
1563     // There is no known reason why this should occur but just in case...
1564     assert(false, "should never occur");
1565     // Re-sort existing interval list because an Interval::from() has changed
1566     _sorted_intervals->sort(interval_cmp);
1567     _needs_full_resort = false;
1568   }
1569 
1570   IntervalList* unsorted_list = &_intervals;
1571   int unsorted_len = unsorted_list->length();
1572   int sorted_len = 0;
1573   int unsorted_idx;
1574   int sorted_idx = 0;
1575   int sorted_from_max = -1;
1576 
1577   // calc number of items for sorted list (sorted list must not contain NULL values)
1578   for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) {
1579     if (unsorted_list->at(unsorted_idx) != NULL) {
1580       sorted_len++;
1581     }
1582   }
1583   IntervalArray* sorted_list = new IntervalArray(sorted_len, sorted_len, NULL);
1584 
1585   // special sorting algorithm: the original interval-list is almost sorted,
1586   // only some intervals are swapped. So this is much faster than a complete QuickSort
1587   for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) {
1588     Interval* cur_interval = unsorted_list->at(unsorted_idx);
1589 
1590     if (cur_interval != NULL) {
1591       int cur_from = cur_interval->from();
1592 
1593       if (sorted_from_max <= cur_from) {
1594         sorted_list->at_put(sorted_idx++, cur_interval);
1595         sorted_from_max = cur_interval->from();
1596       } else {
1597         // the asumption that the intervals are already sorted failed,
1598         // so this interval must be sorted in manually
1599         int j;
1600         for (j = sorted_idx - 1; j >= 0 && cur_from < sorted_list->at(j)->from(); j--) {
1601           sorted_list->at_put(j + 1, sorted_list->at(j));
1602         }
1603         sorted_list->at_put(j + 1, cur_interval);
1604         sorted_idx++;
1605       }
1606     }
1607   }
1608   _sorted_intervals = sorted_list;
1609   assert(is_sorted(_sorted_intervals), "intervals unsorted");
1610 }
1611 
sort_intervals_after_allocation()1612 void LinearScan::sort_intervals_after_allocation() {
1613   TIME_LINEAR_SCAN(timer_sort_intervals_after);
1614 
1615   if (_needs_full_resort) {
1616     // Re-sort existing interval list because an Interval::from() has changed
1617     _sorted_intervals->sort(interval_cmp);
1618     _needs_full_resort = false;
1619   }
1620 
1621   IntervalArray* old_list = _sorted_intervals;
1622   IntervalList* new_list = _new_intervals_from_allocation;
1623   int old_len = old_list->length();
1624   int new_len = new_list == NULL ? 0 : new_list->length();
1625 
1626   if (new_len == 0) {
1627     // no intervals have been added during allocation, so sorted list is already up to date
1628     assert(is_sorted(_sorted_intervals), "intervals unsorted");
1629     return;
1630   }
1631 
1632   // conventional sort-algorithm for new intervals
1633   new_list->sort(interval_cmp);
1634 
1635   // merge old and new list (both already sorted) into one combined list
1636   int combined_list_len = old_len + new_len;
1637   IntervalArray* combined_list = new IntervalArray(combined_list_len, combined_list_len, NULL);
1638   int old_idx = 0;
1639   int new_idx = 0;
1640 
1641   while (old_idx + new_idx < old_len + new_len) {
1642     if (new_idx >= new_len || (old_idx < old_len && old_list->at(old_idx)->from() <= new_list->at(new_idx)->from())) {
1643       combined_list->at_put(old_idx + new_idx, old_list->at(old_idx));
1644       old_idx++;
1645     } else {
1646       combined_list->at_put(old_idx + new_idx, new_list->at(new_idx));
1647       new_idx++;
1648     }
1649   }
1650 
1651   _sorted_intervals = combined_list;
1652   assert(is_sorted(_sorted_intervals), "intervals unsorted");
1653 }
1654 
1655 
allocate_registers()1656 void LinearScan::allocate_registers() {
1657   TIME_LINEAR_SCAN(timer_allocate_registers);
1658 
1659   Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
1660   Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
1661 
1662   // collect cpu intervals
1663   create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals,
1664                          is_precolored_cpu_interval, is_virtual_cpu_interval);
1665 
1666   // collect fpu intervals
1667   create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals,
1668                          is_precolored_fpu_interval, is_virtual_fpu_interval);
1669   // this fpu interval collection cannot be moved down below with the allocation section as
1670   // the cpu_lsw.walk() changes interval positions.
1671 
1672   if (!has_fpu_registers()) {
1673 #ifdef ASSERT
1674     assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
1675 #else
1676     if (not_precolored_fpu_intervals != Interval::end()) {
1677       BAILOUT("missed an uncolored fpu interval");
1678     }
1679 #endif
1680   }
1681 
1682   // allocate cpu registers
1683   LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
1684   cpu_lsw.walk();
1685   cpu_lsw.finish_allocation();
1686 
1687   if (has_fpu_registers()) {
1688     // allocate fpu registers
1689     LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
1690     fpu_lsw.walk();
1691     fpu_lsw.finish_allocation();
1692   }
1693 }
1694 
1695 
1696 // ********** Phase 6: resolve data flow
1697 // (insert moves at edges between blocks if intervals have been split)
1698 
1699 // wrapper for Interval::split_child_at_op_id that performs a bailout in product mode
1700 // instead of returning NULL
split_child_at_op_id(Interval * interval,int op_id,LIR_OpVisitState::OprMode mode)1701 Interval* LinearScan::split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode) {
1702   Interval* result = interval->split_child_at_op_id(op_id, mode);
1703   if (result != NULL) {
1704     return result;
1705   }
1706 
1707   assert(false, "must find an interval, but do a clean bailout in product mode");
1708   result = new Interval(LIR_OprDesc::vreg_base);
1709   result->assign_reg(0);
1710   result->set_type(T_INT);
1711   BAILOUT_("LinearScan: interval is NULL", result);
1712 }
1713 
1714 
interval_at_block_begin(BlockBegin * block,int reg_num)1715 Interval* LinearScan::interval_at_block_begin(BlockBegin* block, int reg_num) {
1716   assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
1717   assert(interval_at(reg_num) != NULL, "no interval found");
1718 
1719   return split_child_at_op_id(interval_at(reg_num), block->first_lir_instruction_id(), LIR_OpVisitState::outputMode);
1720 }
1721 
interval_at_block_end(BlockBegin * block,int reg_num)1722 Interval* LinearScan::interval_at_block_end(BlockBegin* block, int reg_num) {
1723   assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
1724   assert(interval_at(reg_num) != NULL, "no interval found");
1725 
1726   return split_child_at_op_id(interval_at(reg_num), block->last_lir_instruction_id() + 1, LIR_OpVisitState::outputMode);
1727 }
1728 
interval_at_op_id(int reg_num,int op_id)1729 Interval* LinearScan::interval_at_op_id(int reg_num, int op_id) {
1730   assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
1731   assert(interval_at(reg_num) != NULL, "no interval found");
1732 
1733   return split_child_at_op_id(interval_at(reg_num), op_id, LIR_OpVisitState::inputMode);
1734 }
1735 
1736 
resolve_collect_mappings(BlockBegin * from_block,BlockBegin * to_block,MoveResolver & move_resolver)1737 void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) {
1738   DEBUG_ONLY(move_resolver.check_empty());
1739 
1740   const int size = live_set_size();
1741   const ResourceBitMap live_at_edge = to_block->live_in();
1742 
1743   // visit all registers where the live_at_edge bit is set
1744   for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
1745     assert(r < num_virtual_regs(), "live information set for not exisiting interval");
1746     assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
1747 
1748     Interval* from_interval = interval_at_block_end(from_block, r);
1749     Interval* to_interval = interval_at_block_begin(to_block, r);
1750 
1751     if (from_interval != to_interval && (from_interval->assigned_reg() != to_interval->assigned_reg() || from_interval->assigned_regHi() != to_interval->assigned_regHi())) {
1752       // need to insert move instruction
1753       move_resolver.add_mapping(from_interval, to_interval);
1754     }
1755   }
1756 }
1757 
1758 
resolve_find_insert_pos(BlockBegin * from_block,BlockBegin * to_block,MoveResolver & move_resolver)1759 void LinearScan::resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) {
1760   if (from_block->number_of_sux() <= 1) {
1761     TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at end of from_block B%d", from_block->block_id()));
1762 
1763     LIR_OpList* instructions = from_block->lir()->instructions_list();
1764     LIR_OpBranch* branch = instructions->last()->as_OpBranch();
1765     if (branch != NULL) {
1766       // insert moves before branch
1767       assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
1768       move_resolver.set_insert_position(from_block->lir(), instructions->length() - 2);
1769     } else {
1770       move_resolver.set_insert_position(from_block->lir(), instructions->length() - 1);
1771     }
1772 
1773   } else {
1774     TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at beginning of to_block B%d", to_block->block_id()));
1775 #ifdef ASSERT
1776     assert(from_block->lir()->instructions_list()->at(0)->as_OpLabel() != NULL, "block does not start with a label");
1777 
1778     // because the number of predecessor edges matches the number of
1779     // successor edges, blocks which are reached by switch statements
1780     // may have be more than one predecessor but it will be guaranteed
1781     // that all predecessors will be the same.
1782     for (int i = 0; i < to_block->number_of_preds(); i++) {
1783       assert(from_block == to_block->pred_at(i), "all critical edges must be broken");
1784     }
1785 #endif
1786 
1787     move_resolver.set_insert_position(to_block->lir(), 0);
1788   }
1789 }
1790 
1791 
1792 // insert necessary moves (spilling or reloading) at edges between blocks if interval has been split
resolve_data_flow()1793 void LinearScan::resolve_data_flow() {
1794   TIME_LINEAR_SCAN(timer_resolve_data_flow);
1795 
1796   int num_blocks = block_count();
1797   MoveResolver move_resolver(this);
1798   ResourceBitMap block_completed(num_blocks);
1799   ResourceBitMap already_resolved(num_blocks);
1800 
1801   int i;
1802   for (i = 0; i < num_blocks; i++) {
1803     BlockBegin* block = block_at(i);
1804 
1805     // check if block has only one predecessor and only one successor
1806     if (block->number_of_preds() == 1 && block->number_of_sux() == 1 && block->number_of_exception_handlers() == 0) {
1807       LIR_OpList* instructions = block->lir()->instructions_list();
1808       assert(instructions->at(0)->code() == lir_label, "block must start with label");
1809       assert(instructions->last()->code() == lir_branch, "block with successors must end with branch");
1810       assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block with successor must end with unconditional branch");
1811 
1812       // check if block is empty (only label and branch)
1813       if (instructions->length() == 2) {
1814         BlockBegin* pred = block->pred_at(0);
1815         BlockBegin* sux = block->sux_at(0);
1816 
1817         // prevent optimization of two consecutive blocks
1818         if (!block_completed.at(pred->linear_scan_number()) && !block_completed.at(sux->linear_scan_number())) {
1819           TRACE_LINEAR_SCAN(3, tty->print_cr("**** optimizing empty block B%d (pred: B%d, sux: B%d)", block->block_id(), pred->block_id(), sux->block_id()));
1820           block_completed.set_bit(block->linear_scan_number());
1821 
1822           // directly resolve between pred and sux (without looking at the empty block between)
1823           resolve_collect_mappings(pred, sux, move_resolver);
1824           if (move_resolver.has_mappings()) {
1825             move_resolver.set_insert_position(block->lir(), 0);
1826             move_resolver.resolve_and_append_moves();
1827           }
1828         }
1829       }
1830     }
1831   }
1832 
1833 
1834   for (i = 0; i < num_blocks; i++) {
1835     if (!block_completed.at(i)) {
1836       BlockBegin* from_block = block_at(i);
1837       already_resolved.set_from(block_completed);
1838 
1839       int num_sux = from_block->number_of_sux();
1840       for (int s = 0; s < num_sux; s++) {
1841         BlockBegin* to_block = from_block->sux_at(s);
1842 
1843         // check for duplicate edges between the same blocks (can happen with switch blocks)
1844         if (!already_resolved.at(to_block->linear_scan_number())) {
1845           TRACE_LINEAR_SCAN(3, tty->print_cr("**** processing edge between B%d and B%d", from_block->block_id(), to_block->block_id()));
1846           already_resolved.set_bit(to_block->linear_scan_number());
1847 
1848           // collect all intervals that have been split between from_block and to_block
1849           resolve_collect_mappings(from_block, to_block, move_resolver);
1850           if (move_resolver.has_mappings()) {
1851             resolve_find_insert_pos(from_block, to_block, move_resolver);
1852             move_resolver.resolve_and_append_moves();
1853           }
1854         }
1855       }
1856     }
1857   }
1858 }
1859 
1860 
resolve_exception_entry(BlockBegin * block,int reg_num,MoveResolver & move_resolver)1861 void LinearScan::resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver) {
1862   if (interval_at(reg_num) == NULL) {
1863     // if a phi function is never used, no interval is created -> ignore this
1864     return;
1865   }
1866 
1867   Interval* interval = interval_at_block_begin(block, reg_num);
1868   int reg = interval->assigned_reg();
1869   int regHi = interval->assigned_regHi();
1870 
1871   if ((reg < nof_regs && interval->always_in_memory()) ||
1872       (use_fpu_stack_allocation() && reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg)) {
1873     // the interval is split to get a short range that is located on the stack
1874     // in the following two cases:
1875     // * the interval started in memory (e.g. method parameter), but is currently in a register
1876     //   this is an optimization for exception handling that reduces the number of moves that
1877     //   are necessary for resolving the states when an exception uses this exception handler
1878     // * the interval would be on the fpu stack at the begin of the exception handler
1879     //   this is not allowed because of the complicated fpu stack handling on Intel
1880 
1881     // range that will be spilled to memory
1882     int from_op_id = block->first_lir_instruction_id();
1883     int to_op_id = from_op_id + 1;  // short live range of length 1
1884     assert(interval->from() <= from_op_id && interval->to() >= to_op_id,
1885            "no split allowed between exception entry and first instruction");
1886 
1887     if (interval->from() != from_op_id) {
1888       // the part before from_op_id is unchanged
1889       interval = interval->split(from_op_id);
1890       interval->assign_reg(reg, regHi);
1891       append_interval(interval);
1892     } else {
1893       _needs_full_resort = true;
1894     }
1895     assert(interval->from() == from_op_id, "must be true now");
1896 
1897     Interval* spilled_part = interval;
1898     if (interval->to() != to_op_id) {
1899       // the part after to_op_id is unchanged
1900       spilled_part = interval->split_from_start(to_op_id);
1901       append_interval(spilled_part);
1902       move_resolver.add_mapping(spilled_part, interval);
1903     }
1904     assign_spill_slot(spilled_part);
1905 
1906     assert(spilled_part->from() == from_op_id && spilled_part->to() == to_op_id, "just checking");
1907   }
1908 }
1909 
resolve_exception_entry(BlockBegin * block,MoveResolver & move_resolver)1910 void LinearScan::resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver) {
1911   assert(block->is_set(BlockBegin::exception_entry_flag), "should not call otherwise");
1912   DEBUG_ONLY(move_resolver.check_empty());
1913 
1914   // visit all registers where the live_in bit is set
1915   int size = live_set_size();
1916   for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
1917     resolve_exception_entry(block, r, move_resolver);
1918   }
1919 
1920   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
1921   for_each_phi_fun(block, phi,
1922     if (!phi->is_illegal()) { resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver); }
1923   );
1924 
1925   if (move_resolver.has_mappings()) {
1926     // insert moves after first instruction
1927     move_resolver.set_insert_position(block->lir(), 0);
1928     move_resolver.resolve_and_append_moves();
1929   }
1930 }
1931 
1932 
resolve_exception_edge(XHandler * handler,int throwing_op_id,int reg_num,Phi * phi,MoveResolver & move_resolver)1933 void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver) {
1934   if (interval_at(reg_num) == NULL) {
1935     // if a phi function is never used, no interval is created -> ignore this
1936     return;
1937   }
1938 
1939   // the computation of to_interval is equal to resolve_collect_mappings,
1940   // but from_interval is more complicated because of phi functions
1941   BlockBegin* to_block = handler->entry_block();
1942   Interval* to_interval = interval_at_block_begin(to_block, reg_num);
1943 
1944   if (phi != NULL) {
1945     // phi function of the exception entry block
1946     // no moves are created for this phi function in the LIR_Generator, so the
1947     // interval at the throwing instruction must be searched using the operands
1948     // of the phi function
1949     Value from_value = phi->operand_at(handler->phi_operand());
1950 
1951     // with phi functions it can happen that the same from_value is used in
1952     // multiple mappings, so notify move-resolver that this is allowed
1953     move_resolver.set_multiple_reads_allowed();
1954 
1955     Constant* con = from_value->as_Constant();
1956     if (con != NULL && (!con->is_pinned() || con->operand()->is_constant())) {
1957       // Need a mapping from constant to interval if unpinned (may have no register) or if the operand is a constant (no register).
1958       move_resolver.add_mapping(LIR_OprFact::value_type(con->type()), to_interval);
1959     } else {
1960       // search split child at the throwing op_id
1961       Interval* from_interval = interval_at_op_id(from_value->operand()->vreg_number(), throwing_op_id);
1962       move_resolver.add_mapping(from_interval, to_interval);
1963     }
1964   } else {
1965     // no phi function, so use reg_num also for from_interval
1966     // search split child at the throwing op_id
1967     Interval* from_interval = interval_at_op_id(reg_num, throwing_op_id);
1968     if (from_interval != to_interval) {
1969       // optimization to reduce number of moves: when to_interval is on stack and
1970       // the stack slot is known to be always correct, then no move is necessary
1971       if (!from_interval->always_in_memory() || from_interval->canonical_spill_slot() != to_interval->assigned_reg()) {
1972         move_resolver.add_mapping(from_interval, to_interval);
1973       }
1974     }
1975   }
1976 }
1977 
resolve_exception_edge(XHandler * handler,int throwing_op_id,MoveResolver & move_resolver)1978 void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver) {
1979   TRACE_LINEAR_SCAN(4, tty->print_cr("resolving exception handler B%d: throwing_op_id=%d", handler->entry_block()->block_id(), throwing_op_id));
1980 
1981   DEBUG_ONLY(move_resolver.check_empty());
1982   assert(handler->lir_op_id() == -1, "already processed this xhandler");
1983   DEBUG_ONLY(handler->set_lir_op_id(throwing_op_id));
1984   assert(handler->entry_code() == NULL, "code already present");
1985 
1986   // visit all registers where the live_in bit is set
1987   BlockBegin* block = handler->entry_block();
1988   int size = live_set_size();
1989   for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
1990     resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver);
1991   }
1992 
1993   // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
1994   for_each_phi_fun(block, phi,
1995     if (!phi->is_illegal()) { resolve_exception_edge(handler, throwing_op_id, phi->operand()->vreg_number(), phi, move_resolver); }
1996   );
1997 
1998   if (move_resolver.has_mappings()) {
1999     LIR_List* entry_code = new LIR_List(compilation());
2000     move_resolver.set_insert_position(entry_code, 0);
2001     move_resolver.resolve_and_append_moves();
2002 
2003     entry_code->jump(handler->entry_block());
2004     handler->set_entry_code(entry_code);
2005   }
2006 }
2007 
2008 
resolve_exception_handlers()2009 void LinearScan::resolve_exception_handlers() {
2010   MoveResolver move_resolver(this);
2011   LIR_OpVisitState visitor;
2012   int num_blocks = block_count();
2013 
2014   int i;
2015   for (i = 0; i < num_blocks; i++) {
2016     BlockBegin* block = block_at(i);
2017     if (block->is_set(BlockBegin::exception_entry_flag)) {
2018       resolve_exception_entry(block, move_resolver);
2019     }
2020   }
2021 
2022   for (i = 0; i < num_blocks; i++) {
2023     BlockBegin* block = block_at(i);
2024     LIR_List* ops = block->lir();
2025     int num_ops = ops->length();
2026 
2027     // iterate all instructions of the block. skip the first because it is always a label
2028     assert(visitor.no_operands(ops->at(0)), "first operation must always be a label");
2029     for (int j = 1; j < num_ops; j++) {
2030       LIR_Op* op = ops->at(j);
2031       int op_id = op->id();
2032 
2033       if (op_id != -1 && has_info(op_id)) {
2034         // visit operation to collect all operands
2035         visitor.visit(op);
2036         assert(visitor.info_count() > 0, "should not visit otherwise");
2037 
2038         XHandlers* xhandlers = visitor.all_xhandler();
2039         int n = xhandlers->length();
2040         for (int k = 0; k < n; k++) {
2041           resolve_exception_edge(xhandlers->handler_at(k), op_id, move_resolver);
2042         }
2043 
2044 #ifdef ASSERT
2045       } else {
2046         visitor.visit(op);
2047         assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
2048 #endif
2049       }
2050     }
2051   }
2052 }
2053 
2054 
2055 // ********** Phase 7: assign register numbers back to LIR
2056 // (includes computation of debug information and oop maps)
2057 
vm_reg_for_interval(Interval * interval)2058 VMReg LinearScan::vm_reg_for_interval(Interval* interval) {
2059   VMReg reg = interval->cached_vm_reg();
2060   if (!reg->is_valid() ) {
2061     reg = vm_reg_for_operand(operand_for_interval(interval));
2062     interval->set_cached_vm_reg(reg);
2063   }
2064   assert(reg == vm_reg_for_operand(operand_for_interval(interval)), "wrong cached value");
2065   return reg;
2066 }
2067 
vm_reg_for_operand(LIR_Opr opr)2068 VMReg LinearScan::vm_reg_for_operand(LIR_Opr opr) {
2069   assert(opr->is_oop(), "currently only implemented for oop operands");
2070   return frame_map()->regname(opr);
2071 }
2072 
2073 
operand_for_interval(Interval * interval)2074 LIR_Opr LinearScan::operand_for_interval(Interval* interval) {
2075   LIR_Opr opr = interval->cached_opr();
2076   if (opr->is_illegal()) {
2077     opr = calc_operand_for_interval(interval);
2078     interval->set_cached_opr(opr);
2079   }
2080 
2081   assert(opr == calc_operand_for_interval(interval), "wrong cached value");
2082   return opr;
2083 }
2084 
calc_operand_for_interval(const Interval * interval)2085 LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
2086   int assigned_reg = interval->assigned_reg();
2087   BasicType type = interval->type();
2088 
2089   if (assigned_reg >= nof_regs) {
2090     // stack slot
2091     assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2092     return LIR_OprFact::stack(assigned_reg - nof_regs, type);
2093 
2094   } else {
2095     // register
2096     switch (type) {
2097       case T_OBJECT: {
2098         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2099         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2100         return LIR_OprFact::single_cpu_oop(assigned_reg);
2101       }
2102 
2103       case T_ADDRESS: {
2104         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2105         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2106         return LIR_OprFact::single_cpu_address(assigned_reg);
2107       }
2108 
2109       case T_METADATA: {
2110         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2111         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2112         return LIR_OprFact::single_cpu_metadata(assigned_reg);
2113       }
2114 
2115 #ifdef __SOFTFP__
2116       case T_FLOAT:  // fall through
2117 #endif // __SOFTFP__
2118       case T_INT: {
2119         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2120         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2121         return LIR_OprFact::single_cpu(assigned_reg);
2122       }
2123 
2124 #ifdef __SOFTFP__
2125       case T_DOUBLE:  // fall through
2126 #endif // __SOFTFP__
2127       case T_LONG: {
2128         int assigned_regHi = interval->assigned_regHi();
2129         assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
2130         assert(num_physical_regs(T_LONG) == 1 ||
2131                (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
2132 
2133         assert(assigned_reg != assigned_regHi, "invalid allocation");
2134         assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
2135                "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
2136         assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
2137         if (requires_adjacent_regs(T_LONG)) {
2138           assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
2139         }
2140 
2141 #ifdef _LP64
2142         return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
2143 #else
2144 #if defined(SPARC) || defined(PPC32)
2145         return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
2146 #else
2147         return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
2148 #endif // SPARC
2149 #endif // LP64
2150       }
2151 
2152 #ifndef __SOFTFP__
2153       case T_FLOAT: {
2154 #ifdef X86
2155         if (UseSSE >= 1) {
2156           int last_xmm_reg = pd_last_xmm_reg;
2157 #ifdef _LP64
2158           if (UseAVX < 3) {
2159             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2160           }
2161 #endif
2162           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2163           assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2164           return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
2165         }
2166 #endif
2167 
2168         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2169         assert(interval->assigned_regHi() == any_reg, "must not have hi register");
2170         return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
2171       }
2172 
2173       case T_DOUBLE: {
2174 #ifdef X86
2175         if (UseSSE >= 2) {
2176           int last_xmm_reg = pd_last_xmm_reg;
2177 #ifdef _LP64
2178           if (UseAVX < 3) {
2179             last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
2180           }
2181 #endif
2182           assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= last_xmm_reg, "no xmm register");
2183           assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
2184           return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
2185         }
2186 #endif
2187 
2188 #ifdef SPARC
2189         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2190         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2191         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2192         LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
2193 #elif defined(ARM32)
2194         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2195         assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
2196         assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
2197         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
2198 #else
2199         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
2200         assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
2201         LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
2202 #endif
2203         return result;
2204       }
2205 #endif // __SOFTFP__
2206 
2207       default: {
2208         ShouldNotReachHere();
2209         return LIR_OprFact::illegalOpr;
2210       }
2211     }
2212   }
2213 }
2214 
canonical_spill_opr(Interval * interval)2215 LIR_Opr LinearScan::canonical_spill_opr(Interval* interval) {
2216   assert(interval->canonical_spill_slot() >= nof_regs, "canonical spill slot not set");
2217   return LIR_OprFact::stack(interval->canonical_spill_slot() - nof_regs, interval->type());
2218 }
2219 
color_lir_opr(LIR_Opr opr,int op_id,LIR_OpVisitState::OprMode mode)2220 LIR_Opr LinearScan::color_lir_opr(LIR_Opr opr, int op_id, LIR_OpVisitState::OprMode mode) {
2221   assert(opr->is_virtual(), "should not call this otherwise");
2222 
2223   Interval* interval = interval_at(opr->vreg_number());
2224   assert(interval != NULL, "interval must exist");
2225 
2226   if (op_id != -1) {
2227 #ifdef ASSERT
2228     BlockBegin* block = block_of_op_with_id(op_id);
2229     if (block->number_of_sux() <= 1 && op_id == block->last_lir_instruction_id()) {
2230       // check if spill moves could have been appended at the end of this block, but
2231       // before the branch instruction. So the split child information for this branch would
2232       // be incorrect.
2233       LIR_OpBranch* branch = block->lir()->instructions_list()->last()->as_OpBranch();
2234       if (branch != NULL) {
2235         if (block->live_out().at(opr->vreg_number())) {
2236           assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
2237           assert(false, "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolve_data_flow)");
2238         }
2239       }
2240     }
2241 #endif
2242 
2243     // operands are not changed when an interval is split during allocation,
2244     // so search the right interval here
2245     interval = split_child_at_op_id(interval, op_id, mode);
2246   }
2247 
2248   LIR_Opr res = operand_for_interval(interval);
2249 
2250 #ifdef X86
2251   // new semantic for is_last_use: not only set on definite end of interval,
2252   // but also before hole
2253   // This may still miss some cases (e.g. for dead values), but it is not necessary that the
2254   // last use information is completely correct
2255   // information is only needed for fpu stack allocation
2256   if (res->is_fpu_register()) {
2257     if (opr->is_last_use() || op_id == interval->to() || (op_id != -1 && interval->has_hole_between(op_id, op_id + 1))) {
2258       assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow");
2259       res = res->make_last_use();
2260     }
2261   }
2262 #endif
2263 
2264   assert(!gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::callee_saved) || !FrameMap::is_caller_save_register(res), "bad allocation");
2265 
2266   return res;
2267 }
2268 
2269 
2270 #ifdef ASSERT
2271 // some methods used to check correctness of debug information
2272 
assert_no_register_values(GrowableArray<ScopeValue * > * values)2273 void assert_no_register_values(GrowableArray<ScopeValue*>* values) {
2274   if (values == NULL) {
2275     return;
2276   }
2277 
2278   for (int i = 0; i < values->length(); i++) {
2279     ScopeValue* value = values->at(i);
2280 
2281     if (value->is_location()) {
2282       Location location = ((LocationValue*)value)->location();
2283       assert(location.where() == Location::on_stack, "value is in register");
2284     }
2285   }
2286 }
2287 
assert_no_register_values(GrowableArray<MonitorValue * > * values)2288 void assert_no_register_values(GrowableArray<MonitorValue*>* values) {
2289   if (values == NULL) {
2290     return;
2291   }
2292 
2293   for (int i = 0; i < values->length(); i++) {
2294     MonitorValue* value = values->at(i);
2295 
2296     if (value->owner()->is_location()) {
2297       Location location = ((LocationValue*)value->owner())->location();
2298       assert(location.where() == Location::on_stack, "owner is in register");
2299     }
2300     assert(value->basic_lock().where() == Location::on_stack, "basic_lock is in register");
2301   }
2302 }
2303 
assert_equal(Location l1,Location l2)2304 void assert_equal(Location l1, Location l2) {
2305   assert(l1.where() == l2.where() && l1.type() == l2.type() && l1.offset() == l2.offset(), "");
2306 }
2307 
assert_equal(ScopeValue * v1,ScopeValue * v2)2308 void assert_equal(ScopeValue* v1, ScopeValue* v2) {
2309   if (v1->is_location()) {
2310     assert(v2->is_location(), "");
2311     assert_equal(((LocationValue*)v1)->location(), ((LocationValue*)v2)->location());
2312   } else if (v1->is_constant_int()) {
2313     assert(v2->is_constant_int(), "");
2314     assert(((ConstantIntValue*)v1)->value() == ((ConstantIntValue*)v2)->value(), "");
2315   } else if (v1->is_constant_double()) {
2316     assert(v2->is_constant_double(), "");
2317     assert(((ConstantDoubleValue*)v1)->value() == ((ConstantDoubleValue*)v2)->value(), "");
2318   } else if (v1->is_constant_long()) {
2319     assert(v2->is_constant_long(), "");
2320     assert(((ConstantLongValue*)v1)->value() == ((ConstantLongValue*)v2)->value(), "");
2321   } else if (v1->is_constant_oop()) {
2322     assert(v2->is_constant_oop(), "");
2323     assert(((ConstantOopWriteValue*)v1)->value() == ((ConstantOopWriteValue*)v2)->value(), "");
2324   } else {
2325     ShouldNotReachHere();
2326   }
2327 }
2328 
assert_equal(MonitorValue * m1,MonitorValue * m2)2329 void assert_equal(MonitorValue* m1, MonitorValue* m2) {
2330   assert_equal(m1->owner(), m2->owner());
2331   assert_equal(m1->basic_lock(), m2->basic_lock());
2332 }
2333 
assert_equal(IRScopeDebugInfo * d1,IRScopeDebugInfo * d2)2334 void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) {
2335   assert(d1->scope() == d2->scope(), "not equal");
2336   assert(d1->bci() == d2->bci(), "not equal");
2337 
2338   if (d1->locals() != NULL) {
2339     assert(d1->locals() != NULL && d2->locals() != NULL, "not equal");
2340     assert(d1->locals()->length() == d2->locals()->length(), "not equal");
2341     for (int i = 0; i < d1->locals()->length(); i++) {
2342       assert_equal(d1->locals()->at(i), d2->locals()->at(i));
2343     }
2344   } else {
2345     assert(d1->locals() == NULL && d2->locals() == NULL, "not equal");
2346   }
2347 
2348   if (d1->expressions() != NULL) {
2349     assert(d1->expressions() != NULL && d2->expressions() != NULL, "not equal");
2350     assert(d1->expressions()->length() == d2->expressions()->length(), "not equal");
2351     for (int i = 0; i < d1->expressions()->length(); i++) {
2352       assert_equal(d1->expressions()->at(i), d2->expressions()->at(i));
2353     }
2354   } else {
2355     assert(d1->expressions() == NULL && d2->expressions() == NULL, "not equal");
2356   }
2357 
2358   if (d1->monitors() != NULL) {
2359     assert(d1->monitors() != NULL && d2->monitors() != NULL, "not equal");
2360     assert(d1->monitors()->length() == d2->monitors()->length(), "not equal");
2361     for (int i = 0; i < d1->monitors()->length(); i++) {
2362       assert_equal(d1->monitors()->at(i), d2->monitors()->at(i));
2363     }
2364   } else {
2365     assert(d1->monitors() == NULL && d2->monitors() == NULL, "not equal");
2366   }
2367 
2368   if (d1->caller() != NULL) {
2369     assert(d1->caller() != NULL && d2->caller() != NULL, "not equal");
2370     assert_equal(d1->caller(), d2->caller());
2371   } else {
2372     assert(d1->caller() == NULL && d2->caller() == NULL, "not equal");
2373   }
2374 }
2375 
check_stack_depth(CodeEmitInfo * info,int stack_end)2376 void check_stack_depth(CodeEmitInfo* info, int stack_end) {
2377   if (info->stack()->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) {
2378     Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
2379     switch (code) {
2380       case Bytecodes::_ifnull    : // fall through
2381       case Bytecodes::_ifnonnull : // fall through
2382       case Bytecodes::_ifeq      : // fall through
2383       case Bytecodes::_ifne      : // fall through
2384       case Bytecodes::_iflt      : // fall through
2385       case Bytecodes::_ifge      : // fall through
2386       case Bytecodes::_ifgt      : // fall through
2387       case Bytecodes::_ifle      : // fall through
2388       case Bytecodes::_if_icmpeq : // fall through
2389       case Bytecodes::_if_icmpne : // fall through
2390       case Bytecodes::_if_icmplt : // fall through
2391       case Bytecodes::_if_icmpge : // fall through
2392       case Bytecodes::_if_icmpgt : // fall through
2393       case Bytecodes::_if_icmple : // fall through
2394       case Bytecodes::_if_acmpeq : // fall through
2395       case Bytecodes::_if_acmpne :
2396         assert(stack_end >= -Bytecodes::depth(code), "must have non-empty expression stack at if bytecode");
2397         break;
2398       default:
2399         break;
2400     }
2401   }
2402 }
2403 
2404 #endif // ASSERT
2405 
2406 
init_compute_oop_maps()2407 IntervalWalker* LinearScan::init_compute_oop_maps() {
2408   // setup lists of potential oops for walking
2409   Interval* oop_intervals;
2410   Interval* non_oop_intervals;
2411 
2412   create_unhandled_lists(&oop_intervals, &non_oop_intervals, is_oop_interval, NULL);
2413 
2414   // intervals that have no oops inside need not to be processed
2415   // to ensure a walking until the last instruction id, add a dummy interval
2416   // with a high operation id
2417   non_oop_intervals = new Interval(any_reg);
2418   non_oop_intervals->add_range(max_jint - 2, max_jint - 1);
2419 
2420   return new IntervalWalker(this, oop_intervals, non_oop_intervals);
2421 }
2422 
2423 
compute_oop_map(IntervalWalker * iw,LIR_Op * op,CodeEmitInfo * info,bool is_call_site)2424 OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site) {
2425   TRACE_LINEAR_SCAN(3, tty->print_cr("creating oop map at op_id %d", op->id()));
2426 
2427   // walk before the current operation -> intervals that start at
2428   // the operation (= output operands of the operation) are not
2429   // included in the oop map
2430   iw->walk_before(op->id());
2431 
2432   int frame_size = frame_map()->framesize();
2433   int arg_count = frame_map()->oop_map_arg_count();
2434   OopMap* map = new OopMap(frame_size, arg_count);
2435 
2436   // Iterate through active intervals
2437   for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) {
2438     int assigned_reg = interval->assigned_reg();
2439 
2440     assert(interval->current_from() <= op->id() && op->id() <= interval->current_to(), "interval should not be active otherwise");
2441     assert(interval->assigned_regHi() == any_reg, "oop must be single word");
2442     assert(interval->reg_num() >= LIR_OprDesc::vreg_base, "fixed interval found");
2443 
2444     // Check if this range covers the instruction. Intervals that
2445     // start or end at the current operation are not included in the
2446     // oop map, except in the case of patching moves.  For patching
2447     // moves, any intervals which end at this instruction are included
2448     // in the oop map since we may safepoint while doing the patch
2449     // before we've consumed the inputs.
2450     if (op->is_patching() || op->id() < interval->current_to()) {
2451 
2452       // caller-save registers must not be included into oop-maps at calls
2453       assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
2454 
2455       VMReg name = vm_reg_for_interval(interval);
2456       set_oop(map, name);
2457 
2458       // Spill optimization: when the stack value is guaranteed to be always correct,
2459       // then it must be added to the oop map even if the interval is currently in a register
2460       if (interval->always_in_memory() &&
2461           op->id() > interval->spill_definition_pos() &&
2462           interval->assigned_reg() != interval->canonical_spill_slot()) {
2463         assert(interval->spill_definition_pos() > 0, "position not set correctly");
2464         assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned");
2465         assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice");
2466 
2467         set_oop(map, frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
2468       }
2469     }
2470   }
2471 
2472   // add oops from lock stack
2473   assert(info->stack() != NULL, "CodeEmitInfo must always have a stack");
2474   int locks_count = info->stack()->total_locks_size();
2475   for (int i = 0; i < locks_count; i++) {
2476     set_oop(map, frame_map()->monitor_object_regname(i));
2477   }
2478 
2479   return map;
2480 }
2481 
2482 
compute_oop_map(IntervalWalker * iw,const LIR_OpVisitState & visitor,LIR_Op * op)2483 void LinearScan::compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op) {
2484   assert(visitor.info_count() > 0, "no oop map needed");
2485 
2486   // compute oop_map only for first CodeEmitInfo
2487   // because it is (in most cases) equal for all other infos of the same operation
2488   CodeEmitInfo* first_info = visitor.info_at(0);
2489   OopMap* first_oop_map = compute_oop_map(iw, op, first_info, visitor.has_call());
2490 
2491   for (int i = 0; i < visitor.info_count(); i++) {
2492     CodeEmitInfo* info = visitor.info_at(i);
2493     OopMap* oop_map = first_oop_map;
2494 
2495     // compute worst case interpreter size in case of a deoptimization
2496     _compilation->update_interpreter_frame_size(info->interpreter_frame_size());
2497 
2498     if (info->stack()->locks_size() != first_info->stack()->locks_size()) {
2499       // this info has a different number of locks then the precomputed oop map
2500       // (possible for lock and unlock instructions) -> compute oop map with
2501       // correct lock information
2502       oop_map = compute_oop_map(iw, op, info, visitor.has_call());
2503     }
2504 
2505     if (info->_oop_map == NULL) {
2506       info->_oop_map = oop_map;
2507     } else {
2508       // a CodeEmitInfo can not be shared between different LIR-instructions
2509       // because interval splitting can occur anywhere between two instructions
2510       // and so the oop maps must be different
2511       // -> check if the already set oop_map is exactly the one calculated for this operation
2512       assert(info->_oop_map == oop_map, "same CodeEmitInfo used for multiple LIR instructions");
2513     }
2514   }
2515 }
2516 
2517 
2518 // frequently used constants
2519 // Allocate them with new so they are never destroyed (otherwise, a
2520 // forced exit could destroy these objects while they are still in
2521 // use).
2522 ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantOopWriteValue(NULL);
2523 ConstantIntValue*      LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(-1);
2524 ConstantIntValue*      LinearScan::_int_0_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue((jint)0);
2525 ConstantIntValue*      LinearScan::_int_1_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(1);
2526 ConstantIntValue*      LinearScan::_int_2_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(2);
2527 LocationValue*         _illegal_value = new (ResourceObj::C_HEAP, mtCompiler) LocationValue(Location());
2528 
init_compute_debug_info()2529 void LinearScan::init_compute_debug_info() {
2530   // cache for frequently used scope values
2531   // (cpu registers and stack slots)
2532   int cache_size = (LinearScan::nof_cpu_regs + frame_map()->argcount() + max_spills()) * 2;
2533   _scope_value_cache = ScopeValueArray(cache_size, cache_size, NULL);
2534 }
2535 
location_for_monitor_index(int monitor_index)2536 MonitorValue* LinearScan::location_for_monitor_index(int monitor_index) {
2537   Location loc;
2538   if (!frame_map()->location_for_monitor_object(monitor_index, &loc)) {
2539     bailout("too large frame");
2540   }
2541   ScopeValue* object_scope_value = new LocationValue(loc);
2542 
2543   if (!frame_map()->location_for_monitor_lock(monitor_index, &loc)) {
2544     bailout("too large frame");
2545   }
2546   return new MonitorValue(object_scope_value, loc);
2547 }
2548 
location_for_name(int name,Location::Type loc_type)2549 LocationValue* LinearScan::location_for_name(int name, Location::Type loc_type) {
2550   Location loc;
2551   if (!frame_map()->locations_for_slot(name, loc_type, &loc)) {
2552     bailout("too large frame");
2553   }
2554   return new LocationValue(loc);
2555 }
2556 
2557 
append_scope_value_for_constant(LIR_Opr opr,GrowableArray<ScopeValue * > * scope_values)2558 int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
2559   assert(opr->is_constant(), "should not be called otherwise");
2560 
2561   LIR_Const* c = opr->as_constant_ptr();
2562   BasicType t = c->type();
2563   switch (t) {
2564     case T_OBJECT: {
2565       jobject value = c->as_jobject();
2566       if (value == NULL) {
2567         scope_values->append(_oop_null_scope_value);
2568       } else {
2569         scope_values->append(new ConstantOopWriteValue(c->as_jobject()));
2570       }
2571       return 1;
2572     }
2573 
2574     case T_INT: // fall through
2575     case T_FLOAT: {
2576       int value = c->as_jint_bits();
2577       switch (value) {
2578         case -1: scope_values->append(_int_m1_scope_value); break;
2579         case 0:  scope_values->append(_int_0_scope_value); break;
2580         case 1:  scope_values->append(_int_1_scope_value); break;
2581         case 2:  scope_values->append(_int_2_scope_value); break;
2582         default: scope_values->append(new ConstantIntValue(c->as_jint_bits())); break;
2583       }
2584       return 1;
2585     }
2586 
2587     case T_LONG: // fall through
2588     case T_DOUBLE: {
2589 #ifdef _LP64
2590       scope_values->append(_int_0_scope_value);
2591       scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
2592 #else
2593       if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
2594         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
2595         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
2596       } else {
2597         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
2598         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
2599       }
2600 #endif
2601       return 2;
2602     }
2603 
2604     case T_ADDRESS: {
2605 #ifdef _LP64
2606       scope_values->append(new ConstantLongValue(c->as_jint()));
2607 #else
2608       scope_values->append(new ConstantIntValue(c->as_jint()));
2609 #endif
2610       return 1;
2611     }
2612 
2613     default:
2614       ShouldNotReachHere();
2615       return -1;
2616   }
2617 }
2618 
append_scope_value_for_operand(LIR_Opr opr,GrowableArray<ScopeValue * > * scope_values)2619 int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
2620   if (opr->is_single_stack()) {
2621     int stack_idx = opr->single_stack_ix();
2622     bool is_oop = opr->is_oop_register();
2623     int cache_idx = (stack_idx + LinearScan::nof_cpu_regs) * 2 + (is_oop ? 1 : 0);
2624 
2625     ScopeValue* sv = _scope_value_cache.at(cache_idx);
2626     if (sv == NULL) {
2627       Location::Type loc_type = is_oop ? Location::oop : Location::normal;
2628       sv = location_for_name(stack_idx, loc_type);
2629       _scope_value_cache.at_put(cache_idx, sv);
2630     }
2631 
2632     // check if cached value is correct
2633     DEBUG_ONLY(assert_equal(sv, location_for_name(stack_idx, is_oop ? Location::oop : Location::normal)));
2634 
2635     scope_values->append(sv);
2636     return 1;
2637 
2638   } else if (opr->is_single_cpu()) {
2639     bool is_oop = opr->is_oop_register();
2640     int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
2641     Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
2642 
2643     ScopeValue* sv = _scope_value_cache.at(cache_idx);
2644     if (sv == NULL) {
2645       Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
2646       VMReg rname = frame_map()->regname(opr);
2647       sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
2648       _scope_value_cache.at_put(cache_idx, sv);
2649     }
2650 
2651     // check if cached value is correct
2652     DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
2653 
2654     scope_values->append(sv);
2655     return 1;
2656 
2657 #ifdef X86
2658   } else if (opr->is_single_xmm()) {
2659     VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
2660     LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
2661 
2662     scope_values->append(sv);
2663     return 1;
2664 #endif
2665 
2666   } else if (opr->is_single_fpu()) {
2667 #ifdef X86
2668     // the exact location of fpu stack values is only known
2669     // during fpu stack allocation, so the stack allocator object
2670     // must be present
2671     assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2672     assert(_fpu_stack_allocator != NULL, "must be present");
2673     opr = _fpu_stack_allocator->to_fpu_stack(opr);
2674 #endif
2675 
2676     Location::Type loc_type = float_saved_as_double ? Location::float_in_dbl : Location::normal;
2677     VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr());
2678 #ifndef __SOFTFP__
2679 #ifndef VM_LITTLE_ENDIAN
2680     // On S390 a (single precision) float value occupies only the high
2681     // word of the full double register. So when the double register is
2682     // stored to memory (e.g. by the RegisterSaver), then the float value
2683     // is found at offset 0. I.e. the code below is not needed on S390.
2684 #ifndef S390
2685     if (! float_saved_as_double) {
2686       // On big endian system, we may have an issue if float registers use only
2687       // the low half of the (same) double registers.
2688       // Both the float and the double could have the same regnr but would correspond
2689       // to two different addresses once saved.
2690 
2691       // get next safely (no assertion checks)
2692       VMReg next = VMRegImpl::as_VMReg(1+rname->value());
2693       if (next->is_reg() &&
2694           (next->as_FloatRegister() == rname->as_FloatRegister())) {
2695         // the back-end does use the same numbering for the double and the float
2696         rname = next; // VMReg for the low bits, e.g. the real VMReg for the float
2697       }
2698     }
2699 #endif // !S390
2700 #endif
2701 #endif
2702     LocationValue* sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
2703 
2704     scope_values->append(sv);
2705     return 1;
2706 
2707   } else {
2708     // double-size operands
2709 
2710     ScopeValue* first;
2711     ScopeValue* second;
2712 
2713     if (opr->is_double_stack()) {
2714 #ifdef _LP64
2715       Location loc1;
2716       Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl;
2717       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) {
2718         bailout("too large frame");
2719       }
2720       // Does this reverse on x86 vs. sparc?
2721       first =  new LocationValue(loc1);
2722       second = _int_0_scope_value;
2723 #else
2724       Location loc1, loc2;
2725       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
2726         bailout("too large frame");
2727       }
2728       first =  new LocationValue(loc1);
2729       second = new LocationValue(loc2);
2730 #endif // _LP64
2731 
2732     } else if (opr->is_double_cpu()) {
2733 #ifdef _LP64
2734       VMReg rname_first = opr->as_register_lo()->as_VMReg();
2735       first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
2736       second = _int_0_scope_value;
2737 #else
2738       VMReg rname_first = opr->as_register_lo()->as_VMReg();
2739       VMReg rname_second = opr->as_register_hi()->as_VMReg();
2740 
2741       if (hi_word_offset_in_bytes < lo_word_offset_in_bytes) {
2742         // lo/hi and swapped relative to first and second, so swap them
2743         VMReg tmp = rname_first;
2744         rname_first = rname_second;
2745         rname_second = tmp;
2746       }
2747 
2748       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2749       second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2750 #endif //_LP64
2751 
2752 
2753 #ifdef X86
2754     } else if (opr->is_double_xmm()) {
2755       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
2756       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
2757 #  ifdef _LP64
2758       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2759       second = _int_0_scope_value;
2760 #  else
2761       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2762       // %%% This is probably a waste but we'll keep things as they were for now
2763       if (true) {
2764         VMReg rname_second = rname_first->next();
2765         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2766       }
2767 #  endif
2768 #endif
2769 
2770     } else if (opr->is_double_fpu()) {
2771       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
2772       // the double as float registers in the native ordering. On X86,
2773       // fpu_regnrLo is a FPU stack slot whose VMReg represents
2774       // the low-order word of the double and fpu_regnrLo + 1 is the
2775       // name for the other half.  *first and *second must represent the
2776       // least and most significant words, respectively.
2777 
2778 #ifdef X86
2779       // the exact location of fpu stack values is only known
2780       // during fpu stack allocation, so the stack allocator object
2781       // must be present
2782       assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
2783       assert(_fpu_stack_allocator != NULL, "must be present");
2784       opr = _fpu_stack_allocator->to_fpu_stack(opr);
2785 
2786       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
2787 #endif
2788 #ifdef SPARC
2789       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
2790 #endif
2791 #ifdef ARM32
2792       assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
2793 #endif
2794 #ifdef PPC32
2795       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
2796 #endif
2797 
2798 #ifdef VM_LITTLE_ENDIAN
2799       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
2800 #else
2801       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
2802 #endif
2803 
2804 #ifdef _LP64
2805       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
2806       second = _int_0_scope_value;
2807 #else
2808       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
2809       // %%% This is probably a waste but we'll keep things as they were for now
2810       if (true) {
2811         VMReg rname_second = rname_first->next();
2812         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
2813       }
2814 #endif
2815 
2816     } else {
2817       ShouldNotReachHere();
2818       first = NULL;
2819       second = NULL;
2820     }
2821 
2822     assert(first != NULL && second != NULL, "must be set");
2823     // The convention the interpreter uses is that the second local
2824     // holds the first raw word of the native double representation.
2825     // This is actually reasonable, since locals and stack arrays
2826     // grow downwards in all implementations.
2827     // (If, on some machine, the interpreter's Java locals or stack
2828     // were to grow upwards, the embedded doubles would be word-swapped.)
2829     scope_values->append(second);
2830     scope_values->append(first);
2831     return 2;
2832   }
2833 }
2834 
2835 
append_scope_value(int op_id,Value value,GrowableArray<ScopeValue * > * scope_values)2836 int LinearScan::append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values) {
2837   if (value != NULL) {
2838     LIR_Opr opr = value->operand();
2839     Constant* con = value->as_Constant();
2840 
2841     assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands (or illegal if constant is optimized away)");
2842     assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands");
2843 
2844     if (con != NULL && !con->is_pinned() && !opr->is_constant()) {
2845       // Unpinned constants may have a virtual operand for a part of the lifetime
2846       // or may be illegal when it was optimized away,
2847       // so always use a constant operand
2848       opr = LIR_OprFact::value_type(con->type());
2849     }
2850     assert(opr->is_virtual() || opr->is_constant(), "other cases not allowed here");
2851 
2852     if (opr->is_virtual()) {
2853       LIR_OpVisitState::OprMode mode = LIR_OpVisitState::inputMode;
2854 
2855       BlockBegin* block = block_of_op_with_id(op_id);
2856       if (block->number_of_sux() == 1 && op_id == block->last_lir_instruction_id()) {
2857         // generating debug information for the last instruction of a block.
2858         // if this instruction is a branch, spill moves are inserted before this branch
2859         // and so the wrong operand would be returned (spill moves at block boundaries are not
2860         // considered in the live ranges of intervals)
2861         // Solution: use the first op_id of the branch target block instead.
2862         if (block->lir()->instructions_list()->last()->as_OpBranch() != NULL) {
2863           if (block->live_out().at(opr->vreg_number())) {
2864             op_id = block->sux_at(0)->first_lir_instruction_id();
2865             mode = LIR_OpVisitState::outputMode;
2866           }
2867         }
2868       }
2869 
2870       // Get current location of operand
2871       // The operand must be live because debug information is considered when building the intervals
2872       // if the interval is not live, color_lir_opr will cause an assertion failure
2873       opr = color_lir_opr(opr, op_id, mode);
2874       assert(!has_call(op_id) || opr->is_stack() || !is_caller_save(reg_num(opr)), "can not have caller-save register operands at calls");
2875 
2876       // Append to ScopeValue array
2877       return append_scope_value_for_operand(opr, scope_values);
2878 
2879     } else {
2880       assert(value->as_Constant() != NULL, "all other instructions have only virtual operands");
2881       assert(opr->is_constant(), "operand must be constant");
2882 
2883       return append_scope_value_for_constant(opr, scope_values);
2884     }
2885   } else {
2886     // append a dummy value because real value not needed
2887     scope_values->append(_illegal_value);
2888     return 1;
2889   }
2890 }
2891 
2892 
compute_debug_info_for_scope(int op_id,IRScope * cur_scope,ValueStack * cur_state,ValueStack * innermost_state)2893 IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state) {
2894   IRScopeDebugInfo* caller_debug_info = NULL;
2895 
2896   ValueStack* caller_state = cur_state->caller_state();
2897   if (caller_state != NULL) {
2898     // process recursively to compute outermost scope first
2899     caller_debug_info = compute_debug_info_for_scope(op_id, cur_scope->caller(), caller_state, innermost_state);
2900   }
2901 
2902   // initialize these to null.
2903   // If we don't need deopt info or there are no locals, expressions or monitors,
2904   // then these get recorded as no information and avoids the allocation of 0 length arrays.
2905   GrowableArray<ScopeValue*>*   locals      = NULL;
2906   GrowableArray<ScopeValue*>*   expressions = NULL;
2907   GrowableArray<MonitorValue*>* monitors    = NULL;
2908 
2909   // describe local variable values
2910   int nof_locals = cur_state->locals_size();
2911   if (nof_locals > 0) {
2912     locals = new GrowableArray<ScopeValue*>(nof_locals);
2913 
2914     int pos = 0;
2915     while (pos < nof_locals) {
2916       assert(pos < cur_state->locals_size(), "why not?");
2917 
2918       Value local = cur_state->local_at(pos);
2919       pos += append_scope_value(op_id, local, locals);
2920 
2921       assert(locals->length() == pos, "must match");
2922     }
2923     assert(locals->length() == cur_scope->method()->max_locals(), "wrong number of locals");
2924     assert(locals->length() == cur_state->locals_size(), "wrong number of locals");
2925   } else if (cur_scope->method()->max_locals() > 0) {
2926     assert(cur_state->kind() == ValueStack::EmptyExceptionState, "should be");
2927     nof_locals = cur_scope->method()->max_locals();
2928     locals = new GrowableArray<ScopeValue*>(nof_locals);
2929     for(int i = 0; i < nof_locals; i++) {
2930       locals->append(_illegal_value);
2931     }
2932   }
2933 
2934   // describe expression stack
2935   int nof_stack = cur_state->stack_size();
2936   if (nof_stack > 0) {
2937     expressions = new GrowableArray<ScopeValue*>(nof_stack);
2938 
2939     int pos = 0;
2940     while (pos < nof_stack) {
2941       Value expression = cur_state->stack_at_inc(pos);
2942       append_scope_value(op_id, expression, expressions);
2943 
2944       assert(expressions->length() == pos, "must match");
2945     }
2946     assert(expressions->length() == cur_state->stack_size(), "wrong number of stack entries");
2947   }
2948 
2949   // describe monitors
2950   int nof_locks = cur_state->locks_size();
2951   if (nof_locks > 0) {
2952     int lock_offset = cur_state->caller_state() != NULL ? cur_state->caller_state()->total_locks_size() : 0;
2953     monitors = new GrowableArray<MonitorValue*>(nof_locks);
2954     for (int i = 0; i < nof_locks; i++) {
2955       monitors->append(location_for_monitor_index(lock_offset + i));
2956     }
2957   }
2958 
2959   return new IRScopeDebugInfo(cur_scope, cur_state->bci(), locals, expressions, monitors, caller_debug_info);
2960 }
2961 
2962 
compute_debug_info(CodeEmitInfo * info,int op_id)2963 void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) {
2964   TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
2965 
2966   IRScope* innermost_scope = info->scope();
2967   ValueStack* innermost_state = info->stack();
2968 
2969   assert(innermost_scope != NULL && innermost_state != NULL, "why is it missing?");
2970 
2971   DEBUG_ONLY(check_stack_depth(info, innermost_state->stack_size()));
2972 
2973   if (info->_scope_debug_info == NULL) {
2974     // compute debug information
2975     info->_scope_debug_info = compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state);
2976   } else {
2977     // debug information already set. Check that it is correct from the current point of view
2978     DEBUG_ONLY(assert_equal(info->_scope_debug_info, compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state)));
2979   }
2980 }
2981 
2982 
assign_reg_num(LIR_OpList * instructions,IntervalWalker * iw)2983 void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) {
2984   LIR_OpVisitState visitor;
2985   int num_inst = instructions->length();
2986   bool has_dead = false;
2987 
2988   for (int j = 0; j < num_inst; j++) {
2989     LIR_Op* op = instructions->at(j);
2990     if (op == NULL) {  // this can happen when spill-moves are removed in eliminate_spill_moves
2991       has_dead = true;
2992       continue;
2993     }
2994     int op_id = op->id();
2995 
2996     // visit instruction to get list of operands
2997     visitor.visit(op);
2998 
2999     // iterate all modes of the visitor and process all virtual operands
3000     for_each_visitor_mode(mode) {
3001       int n = visitor.opr_count(mode);
3002       for (int k = 0; k < n; k++) {
3003         LIR_Opr opr = visitor.opr_at(mode, k);
3004         if (opr->is_virtual_register()) {
3005           visitor.set_opr_at(mode, k, color_lir_opr(opr, op_id, mode));
3006         }
3007       }
3008     }
3009 
3010     if (visitor.info_count() > 0) {
3011       // exception handling
3012       if (compilation()->has_exception_handlers()) {
3013         XHandlers* xhandlers = visitor.all_xhandler();
3014         int n = xhandlers->length();
3015         for (int k = 0; k < n; k++) {
3016           XHandler* handler = xhandlers->handler_at(k);
3017           if (handler->entry_code() != NULL) {
3018             assign_reg_num(handler->entry_code()->instructions_list(), NULL);
3019           }
3020         }
3021       } else {
3022         assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
3023       }
3024 
3025       // compute oop map
3026       assert(iw != NULL, "needed for compute_oop_map");
3027       compute_oop_map(iw, visitor, op);
3028 
3029       // compute debug information
3030       if (!use_fpu_stack_allocation()) {
3031         // compute debug information if fpu stack allocation is not needed.
3032         // when fpu stack allocation is needed, the debug information can not
3033         // be computed here because the exact location of fpu operands is not known
3034         // -> debug information is created inside the fpu stack allocator
3035         int n = visitor.info_count();
3036         for (int k = 0; k < n; k++) {
3037           compute_debug_info(visitor.info_at(k), op_id);
3038         }
3039       }
3040     }
3041 
3042 #ifdef ASSERT
3043     // make sure we haven't made the op invalid.
3044     op->verify();
3045 #endif
3046 
3047     // remove useless moves
3048     if (op->code() == lir_move) {
3049       assert(op->as_Op1() != NULL, "move must be LIR_Op1");
3050       LIR_Op1* move = (LIR_Op1*)op;
3051       LIR_Opr src = move->in_opr();
3052       LIR_Opr dst = move->result_opr();
3053       if (dst == src ||
3054           (!dst->is_pointer() && !src->is_pointer() &&
3055            src->is_same_register(dst))) {
3056         instructions->at_put(j, NULL);
3057         has_dead = true;
3058       }
3059     }
3060   }
3061 
3062   if (has_dead) {
3063     // iterate all instructions of the block and remove all null-values.
3064     int insert_point = 0;
3065     for (int j = 0; j < num_inst; j++) {
3066       LIR_Op* op = instructions->at(j);
3067       if (op != NULL) {
3068         if (insert_point != j) {
3069           instructions->at_put(insert_point, op);
3070         }
3071         insert_point++;
3072       }
3073     }
3074     instructions->trunc_to(insert_point);
3075   }
3076 }
3077 
assign_reg_num()3078 void LinearScan::assign_reg_num() {
3079   TIME_LINEAR_SCAN(timer_assign_reg_num);
3080 
3081   init_compute_debug_info();
3082   IntervalWalker* iw = init_compute_oop_maps();
3083 
3084   int num_blocks = block_count();
3085   for (int i = 0; i < num_blocks; i++) {
3086     BlockBegin* block = block_at(i);
3087     assign_reg_num(block->lir()->instructions_list(), iw);
3088   }
3089 }
3090 
3091 
do_linear_scan()3092 void LinearScan::do_linear_scan() {
3093   NOT_PRODUCT(_total_timer.begin_method());
3094 
3095   number_instructions();
3096 
3097   NOT_PRODUCT(print_lir(1, "Before Register Allocation"));
3098 
3099   compute_local_live_sets();
3100   compute_global_live_sets();
3101   CHECK_BAILOUT();
3102 
3103   build_intervals();
3104   CHECK_BAILOUT();
3105   sort_intervals_before_allocation();
3106 
3107   NOT_PRODUCT(print_intervals("Before Register Allocation"));
3108   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_before_alloc));
3109 
3110   allocate_registers();
3111   CHECK_BAILOUT();
3112 
3113   resolve_data_flow();
3114   if (compilation()->has_exception_handlers()) {
3115     resolve_exception_handlers();
3116   }
3117   // fill in number of spill slots into frame_map
3118   propagate_spill_slots();
3119   CHECK_BAILOUT();
3120 
3121   NOT_PRODUCT(print_intervals("After Register Allocation"));
3122   NOT_PRODUCT(print_lir(2, "LIR after register allocation:"));
3123 
3124   sort_intervals_after_allocation();
3125 
3126   DEBUG_ONLY(verify());
3127 
3128   eliminate_spill_moves();
3129   assign_reg_num();
3130   CHECK_BAILOUT();
3131 
3132   NOT_PRODUCT(print_lir(2, "LIR after assignment of register numbers:"));
3133   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_after_asign));
3134 
3135   { TIME_LINEAR_SCAN(timer_allocate_fpu_stack);
3136 
3137     if (use_fpu_stack_allocation()) {
3138       allocate_fpu_stack(); // Only has effect on Intel
3139       NOT_PRODUCT(print_lir(2, "LIR after FPU stack allocation:"));
3140     }
3141   }
3142 
3143   { TIME_LINEAR_SCAN(timer_optimize_lir);
3144 
3145     EdgeMoveOptimizer::optimize(ir()->code());
3146     ControlFlowOptimizer::optimize(ir()->code());
3147     // check that cfg is still correct after optimizations
3148     ir()->verify();
3149   }
3150 
3151   NOT_PRODUCT(print_lir(1, "Before Code Generation", false));
3152   NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final));
3153   NOT_PRODUCT(_total_timer.end_method(this));
3154 }
3155 
3156 
3157 // ********** Printing functions
3158 
3159 #ifndef PRODUCT
3160 
print_timers(double total)3161 void LinearScan::print_timers(double total) {
3162   _total_timer.print(total);
3163 }
3164 
print_statistics()3165 void LinearScan::print_statistics() {
3166   _stat_before_alloc.print("before allocation");
3167   _stat_after_asign.print("after assignment of register");
3168   _stat_final.print("after optimization");
3169 }
3170 
print_bitmap(BitMap & b)3171 void LinearScan::print_bitmap(BitMap& b) {
3172   for (unsigned int i = 0; i < b.size(); i++) {
3173     if (b.at(i)) tty->print("%d ", i);
3174   }
3175   tty->cr();
3176 }
3177 
print_intervals(const char * label)3178 void LinearScan::print_intervals(const char* label) {
3179   if (TraceLinearScanLevel >= 1) {
3180     int i;
3181     tty->cr();
3182     tty->print_cr("%s", label);
3183 
3184     for (i = 0; i < interval_count(); i++) {
3185       Interval* interval = interval_at(i);
3186       if (interval != NULL) {
3187         interval->print();
3188       }
3189     }
3190 
3191     tty->cr();
3192     tty->print_cr("--- Basic Blocks ---");
3193     for (i = 0; i < block_count(); i++) {
3194       BlockBegin* block = block_at(i);
3195       tty->print("B%d [%d, %d, %d, %d] ", block->block_id(), block->first_lir_instruction_id(), block->last_lir_instruction_id(), block->loop_index(), block->loop_depth());
3196     }
3197     tty->cr();
3198     tty->cr();
3199   }
3200 
3201   if (PrintCFGToFile) {
3202     CFGPrinter::print_intervals(&_intervals, label);
3203   }
3204 }
3205 
print_lir(int level,const char * label,bool hir_valid)3206 void LinearScan::print_lir(int level, const char* label, bool hir_valid) {
3207   if (TraceLinearScanLevel >= level) {
3208     tty->cr();
3209     tty->print_cr("%s", label);
3210     print_LIR(ir()->linear_scan_order());
3211     tty->cr();
3212   }
3213 
3214   if (level == 1 && PrintCFGToFile) {
3215     CFGPrinter::print_cfg(ir()->linear_scan_order(), label, hir_valid, true);
3216   }
3217 }
3218 
3219 #endif //PRODUCT
3220 
3221 
3222 // ********** verification functions for allocation
3223 // (check that all intervals have a correct register and that no registers are overwritten)
3224 #ifdef ASSERT
3225 
verify()3226 void LinearScan::verify() {
3227   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying intervals ******************************************"));
3228   verify_intervals();
3229 
3230   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that no oops are in fixed intervals ****************"));
3231   verify_no_oops_in_fixed_intervals();
3232 
3233   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that unpinned constants are not alive across block boundaries"));
3234   verify_constants();
3235 
3236   TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying register allocation ********************************"));
3237   verify_registers();
3238 
3239   TRACE_LINEAR_SCAN(2, tty->print_cr("********* no errors found **********************************************"));
3240 }
3241 
verify_intervals()3242 void LinearScan::verify_intervals() {
3243   int len = interval_count();
3244   bool has_error = false;
3245 
3246   for (int i = 0; i < len; i++) {
3247     Interval* i1 = interval_at(i);
3248     if (i1 == NULL) continue;
3249 
3250     i1->check_split_children();
3251 
3252     if (i1->reg_num() != i) {
3253       tty->print_cr("Interval %d is on position %d in list", i1->reg_num(), i); i1->print(); tty->cr();
3254       has_error = true;
3255     }
3256 
3257     if (i1->reg_num() >= LIR_OprDesc::vreg_base && i1->type() == T_ILLEGAL) {
3258       tty->print_cr("Interval %d has no type assigned", i1->reg_num()); i1->print(); tty->cr();
3259       has_error = true;
3260     }
3261 
3262     if (i1->assigned_reg() == any_reg) {
3263       tty->print_cr("Interval %d has no register assigned", i1->reg_num()); i1->print(); tty->cr();
3264       has_error = true;
3265     }
3266 
3267     if (i1->assigned_reg() == i1->assigned_regHi()) {
3268       tty->print_cr("Interval %d: low and high register equal", i1->reg_num()); i1->print(); tty->cr();
3269       has_error = true;
3270     }
3271 
3272     if (!is_processed_reg_num(i1->assigned_reg())) {
3273       tty->print_cr("Can not have an Interval for an ignored register"); i1->print(); tty->cr();
3274       has_error = true;
3275     }
3276 
3277     // special intervals that are created in MoveResolver
3278     // -> ignore them because the range information has no meaning there
3279     if (i1->from() == 1 && i1->to() == 2) continue;
3280 
3281     if (i1->first() == Range::end()) {
3282       tty->print_cr("Interval %d has no Range", i1->reg_num()); i1->print(); tty->cr();
3283       has_error = true;
3284     }
3285 
3286     for (Range* r = i1->first(); r != Range::end(); r = r->next()) {
3287       if (r->from() >= r->to()) {
3288         tty->print_cr("Interval %d has zero length range", i1->reg_num()); i1->print(); tty->cr();
3289         has_error = true;
3290       }
3291     }
3292 
3293     for (int j = i + 1; j < len; j++) {
3294       Interval* i2 = interval_at(j);
3295       if (i2 == NULL || (i2->from() == 1 && i2->to() == 2)) continue;
3296 
3297       int r1 = i1->assigned_reg();
3298       int r1Hi = i1->assigned_regHi();
3299       int r2 = i2->assigned_reg();
3300       int r2Hi = i2->assigned_regHi();
3301       if ((r1 == r2 || r1 == r2Hi || (r1Hi != any_reg && (r1Hi == r2 || r1Hi == r2Hi))) && i1->intersects(i2)) {
3302         tty->print_cr("Intervals %d and %d overlap and have the same register assigned", i1->reg_num(), i2->reg_num());
3303         i1->print(); tty->cr();
3304         i2->print(); tty->cr();
3305         has_error = true;
3306       }
3307     }
3308   }
3309 
3310   assert(has_error == false, "register allocation invalid");
3311 }
3312 
3313 
verify_no_oops_in_fixed_intervals()3314 void LinearScan::verify_no_oops_in_fixed_intervals() {
3315   Interval* fixed_intervals;
3316   Interval* other_intervals;
3317   create_unhandled_lists(&fixed_intervals, &other_intervals, is_precolored_cpu_interval, NULL);
3318 
3319   // to ensure a walking until the last instruction id, add a dummy interval
3320   // with a high operation id
3321   other_intervals = new Interval(any_reg);
3322   other_intervals->add_range(max_jint - 2, max_jint - 1);
3323   IntervalWalker* iw = new IntervalWalker(this, fixed_intervals, other_intervals);
3324 
3325   LIR_OpVisitState visitor;
3326   for (int i = 0; i < block_count(); i++) {
3327     BlockBegin* block = block_at(i);
3328 
3329     LIR_OpList* instructions = block->lir()->instructions_list();
3330 
3331     for (int j = 0; j < instructions->length(); j++) {
3332       LIR_Op* op = instructions->at(j);
3333       int op_id = op->id();
3334 
3335       visitor.visit(op);
3336 
3337       if (visitor.info_count() > 0) {
3338         iw->walk_before(op->id());
3339         bool check_live = true;
3340         if (op->code() == lir_move) {
3341           LIR_Op1* move = (LIR_Op1*)op;
3342           check_live = (move->patch_code() == lir_patch_none);
3343         }
3344         LIR_OpBranch* branch = op->as_OpBranch();
3345         if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) {
3346           // Don't bother checking the stub in this case since the
3347           // exception stub will never return to normal control flow.
3348           check_live = false;
3349         }
3350 
3351         // Make sure none of the fixed registers is live across an
3352         // oopmap since we can't handle that correctly.
3353         if (check_live) {
3354           for (Interval* interval = iw->active_first(fixedKind);
3355                interval != Interval::end();
3356                interval = interval->next()) {
3357             if (interval->current_to() > op->id() + 1) {
3358               // This interval is live out of this op so make sure
3359               // that this interval represents some value that's
3360               // referenced by this op either as an input or output.
3361               bool ok = false;
3362               for_each_visitor_mode(mode) {
3363                 int n = visitor.opr_count(mode);
3364                 for (int k = 0; k < n; k++) {
3365                   LIR_Opr opr = visitor.opr_at(mode, k);
3366                   if (opr->is_fixed_cpu()) {
3367                     if (interval_at(reg_num(opr)) == interval) {
3368                       ok = true;
3369                       break;
3370                     }
3371                     int hi = reg_numHi(opr);
3372                     if (hi != -1 && interval_at(hi) == interval) {
3373                       ok = true;
3374                       break;
3375                     }
3376                   }
3377                 }
3378               }
3379               assert(ok, "fixed intervals should never be live across an oopmap point");
3380             }
3381           }
3382         }
3383       }
3384 
3385       // oop-maps at calls do not contain registers, so check is not needed
3386       if (!visitor.has_call()) {
3387 
3388         for_each_visitor_mode(mode) {
3389           int n = visitor.opr_count(mode);
3390           for (int k = 0; k < n; k++) {
3391             LIR_Opr opr = visitor.opr_at(mode, k);
3392 
3393             if (opr->is_fixed_cpu() && opr->is_oop()) {
3394               // operand is a non-virtual cpu register and contains an oop
3395               TRACE_LINEAR_SCAN(4, op->print_on(tty); tty->print("checking operand "); opr->print(); tty->cr());
3396 
3397               Interval* interval = interval_at(reg_num(opr));
3398               assert(interval != NULL, "no interval");
3399 
3400               if (mode == LIR_OpVisitState::inputMode) {
3401                 if (interval->to() >= op_id + 1) {
3402                   assert(interval->to() < op_id + 2 ||
3403                          interval->has_hole_between(op_id, op_id + 2),
3404                          "oop input operand live after instruction");
3405                 }
3406               } else if (mode == LIR_OpVisitState::outputMode) {
3407                 if (interval->from() <= op_id - 1) {
3408                   assert(interval->has_hole_between(op_id - 1, op_id),
3409                          "oop input operand live after instruction");
3410                 }
3411               }
3412             }
3413           }
3414         }
3415       }
3416     }
3417   }
3418 }
3419 
3420 
verify_constants()3421 void LinearScan::verify_constants() {
3422   int num_regs = num_virtual_regs();
3423   int size = live_set_size();
3424   int num_blocks = block_count();
3425 
3426   for (int i = 0; i < num_blocks; i++) {
3427     BlockBegin* block = block_at(i);
3428     ResourceBitMap live_at_edge = block->live_in();
3429 
3430     // visit all registers where the live_at_edge bit is set
3431     for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
3432       TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id()));
3433 
3434       Value value = gen()->instruction_for_vreg(r);
3435 
3436       assert(value != NULL, "all intervals live across block boundaries must have Value");
3437       assert(value->operand()->is_register() && value->operand()->is_virtual(), "value must have virtual operand");
3438       assert(value->operand()->vreg_number() == r, "register number must match");
3439       // TKR assert(value->as_Constant() == NULL || value->is_pinned(), "only pinned constants can be alive accross block boundaries");
3440     }
3441   }
3442 }
3443 
3444 
3445 class RegisterVerifier: public StackObj {
3446  private:
3447   LinearScan*   _allocator;
3448   BlockList     _work_list;      // all blocks that must be processed
3449   IntervalsList _saved_states;   // saved information of previous check
3450 
3451   // simplified access to methods of LinearScan
compilation() const3452   Compilation*  compilation() const              { return _allocator->compilation(); }
interval_at(int reg_num) const3453   Interval*     interval_at(int reg_num) const   { return _allocator->interval_at(reg_num); }
reg_num(LIR_Opr opr) const3454   int           reg_num(LIR_Opr opr) const       { return _allocator->reg_num(opr); }
3455 
3456   // currently, only registers are processed
state_size()3457   int           state_size()                     { return LinearScan::nof_regs; }
3458 
3459   // accessors
state_for_block(BlockBegin * block)3460   IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); }
set_state_for_block(BlockBegin * block,IntervalList * saved_state)3461   void          set_state_for_block(BlockBegin* block, IntervalList* saved_state) { _saved_states.at_put(block->block_id(), saved_state); }
add_to_work_list(BlockBegin * block)3462   void          add_to_work_list(BlockBegin* block) { if (!_work_list.contains(block)) _work_list.append(block); }
3463 
3464   // helper functions
3465   IntervalList* copy(IntervalList* input_state);
3466   void          state_put(IntervalList* input_state, int reg, Interval* interval);
3467   bool          check_state(IntervalList* input_state, int reg, Interval* interval);
3468 
3469   void process_block(BlockBegin* block);
3470   void process_xhandler(XHandler* xhandler, IntervalList* input_state);
3471   void process_successor(BlockBegin* block, IntervalList* input_state);
3472   void process_operations(LIR_List* ops, IntervalList* input_state);
3473 
3474  public:
RegisterVerifier(LinearScan * allocator)3475   RegisterVerifier(LinearScan* allocator)
3476     : _allocator(allocator)
3477     , _work_list(16)
3478     , _saved_states(BlockBegin::number_of_blocks(), BlockBegin::number_of_blocks(), NULL)
3479   { }
3480 
3481   void verify(BlockBegin* start);
3482 };
3483 
3484 
3485 // entry function from LinearScan that starts the verification
verify_registers()3486 void LinearScan::verify_registers() {
3487   RegisterVerifier verifier(this);
3488   verifier.verify(block_at(0));
3489 }
3490 
3491 
verify(BlockBegin * start)3492 void RegisterVerifier::verify(BlockBegin* start) {
3493   // setup input registers (method arguments) for first block
3494   int input_state_len = state_size();
3495   IntervalList* input_state = new IntervalList(input_state_len, input_state_len, NULL);
3496   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3497   for (int n = 0; n < args->length(); n++) {
3498     LIR_Opr opr = args->at(n);
3499     if (opr->is_register()) {
3500       Interval* interval = interval_at(reg_num(opr));
3501 
3502       if (interval->assigned_reg() < state_size()) {
3503         input_state->at_put(interval->assigned_reg(), interval);
3504       }
3505       if (interval->assigned_regHi() != LinearScan::any_reg && interval->assigned_regHi() < state_size()) {
3506         input_state->at_put(interval->assigned_regHi(), interval);
3507       }
3508     }
3509   }
3510 
3511   set_state_for_block(start, input_state);
3512   add_to_work_list(start);
3513 
3514   // main loop for verification
3515   do {
3516     BlockBegin* block = _work_list.at(0);
3517     _work_list.remove_at(0);
3518 
3519     process_block(block);
3520   } while (!_work_list.is_empty());
3521 }
3522 
process_block(BlockBegin * block)3523 void RegisterVerifier::process_block(BlockBegin* block) {
3524   TRACE_LINEAR_SCAN(2, tty->cr(); tty->print_cr("process_block B%d", block->block_id()));
3525 
3526   // must copy state because it is modified
3527   IntervalList* input_state = copy(state_for_block(block));
3528 
3529   if (TraceLinearScanLevel >= 4) {
3530     tty->print_cr("Input-State of intervals:");
3531     tty->print("    ");
3532     for (int i = 0; i < state_size(); i++) {
3533       if (input_state->at(i) != NULL) {
3534         tty->print(" %4d", input_state->at(i)->reg_num());
3535       } else {
3536         tty->print("   __");
3537       }
3538     }
3539     tty->cr();
3540     tty->cr();
3541   }
3542 
3543   // process all operations of the block
3544   process_operations(block->lir(), input_state);
3545 
3546   // iterate all successors
3547   for (int i = 0; i < block->number_of_sux(); i++) {
3548     process_successor(block->sux_at(i), input_state);
3549   }
3550 }
3551 
process_xhandler(XHandler * xhandler,IntervalList * input_state)3552 void RegisterVerifier::process_xhandler(XHandler* xhandler, IntervalList* input_state) {
3553   TRACE_LINEAR_SCAN(2, tty->print_cr("process_xhandler B%d", xhandler->entry_block()->block_id()));
3554 
3555   // must copy state because it is modified
3556   input_state = copy(input_state);
3557 
3558   if (xhandler->entry_code() != NULL) {
3559     process_operations(xhandler->entry_code(), input_state);
3560   }
3561   process_successor(xhandler->entry_block(), input_state);
3562 }
3563 
process_successor(BlockBegin * block,IntervalList * input_state)3564 void RegisterVerifier::process_successor(BlockBegin* block, IntervalList* input_state) {
3565   IntervalList* saved_state = state_for_block(block);
3566 
3567   if (saved_state != NULL) {
3568     // this block was already processed before.
3569     // check if new input_state is consistent with saved_state
3570 
3571     bool saved_state_correct = true;
3572     for (int i = 0; i < state_size(); i++) {
3573       if (input_state->at(i) != saved_state->at(i)) {
3574         // current input_state and previous saved_state assume a different
3575         // interval in this register -> assume that this register is invalid
3576         if (saved_state->at(i) != NULL) {
3577           // invalidate old calculation only if it assumed that
3578           // register was valid. when the register was already invalid,
3579           // then the old calculation was correct.
3580           saved_state_correct = false;
3581           saved_state->at_put(i, NULL);
3582 
3583           TRACE_LINEAR_SCAN(4, tty->print_cr("process_successor B%d: invalidating slot %d", block->block_id(), i));
3584         }
3585       }
3586     }
3587 
3588     if (saved_state_correct) {
3589       // already processed block with correct input_state
3590       TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: previous visit already correct", block->block_id()));
3591     } else {
3592       // must re-visit this block
3593       TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: must re-visit because input state changed", block->block_id()));
3594       add_to_work_list(block);
3595     }
3596 
3597   } else {
3598     // block was not processed before, so set initial input_state
3599     TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: initial visit", block->block_id()));
3600 
3601     set_state_for_block(block, copy(input_state));
3602     add_to_work_list(block);
3603   }
3604 }
3605 
3606 
copy(IntervalList * input_state)3607 IntervalList* RegisterVerifier::copy(IntervalList* input_state) {
3608   IntervalList* copy_state = new IntervalList(input_state->length());
3609   copy_state->appendAll(input_state);
3610   return copy_state;
3611 }
3612 
state_put(IntervalList * input_state,int reg,Interval * interval)3613 void RegisterVerifier::state_put(IntervalList* input_state, int reg, Interval* interval) {
3614   if (reg != LinearScan::any_reg && reg < state_size()) {
3615     if (interval != NULL) {
3616       TRACE_LINEAR_SCAN(4, tty->print_cr("        reg[%d] = %d", reg, interval->reg_num()));
3617     } else if (input_state->at(reg) != NULL) {
3618       TRACE_LINEAR_SCAN(4, tty->print_cr("        reg[%d] = NULL", reg));
3619     }
3620 
3621     input_state->at_put(reg, interval);
3622   }
3623 }
3624 
check_state(IntervalList * input_state,int reg,Interval * interval)3625 bool RegisterVerifier::check_state(IntervalList* input_state, int reg, Interval* interval) {
3626   if (reg != LinearScan::any_reg && reg < state_size()) {
3627     if (input_state->at(reg) != interval) {
3628       tty->print_cr("!! Error in register allocation: register %d does not contain interval %d", reg, interval->reg_num());
3629       return true;
3630     }
3631   }
3632   return false;
3633 }
3634 
process_operations(LIR_List * ops,IntervalList * input_state)3635 void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_state) {
3636   // visit all instructions of the block
3637   LIR_OpVisitState visitor;
3638   bool has_error = false;
3639 
3640   for (int i = 0; i < ops->length(); i++) {
3641     LIR_Op* op = ops->at(i);
3642     visitor.visit(op);
3643 
3644     TRACE_LINEAR_SCAN(4, op->print_on(tty));
3645 
3646     // check if input operands are correct
3647     int j;
3648     int n = visitor.opr_count(LIR_OpVisitState::inputMode);
3649     for (j = 0; j < n; j++) {
3650       LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, j);
3651       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3652         Interval* interval = interval_at(reg_num(opr));
3653         if (op->id() != -1) {
3654           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode);
3655         }
3656 
3657         has_error |= check_state(input_state, interval->assigned_reg(),   interval->split_parent());
3658         has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent());
3659 
3660         // When an operand is marked with is_last_use, then the fpu stack allocator
3661         // removes the register from the fpu stack -> the register contains no value
3662         if (opr->is_last_use()) {
3663           state_put(input_state, interval->assigned_reg(),   NULL);
3664           state_put(input_state, interval->assigned_regHi(), NULL);
3665         }
3666       }
3667     }
3668 
3669     // invalidate all caller save registers at calls
3670     if (visitor.has_call()) {
3671       for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) {
3672         state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
3673       }
3674       for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
3675         state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
3676       }
3677 
3678 #ifdef X86
3679       int num_caller_save_xmm_regs = FrameMap::get_num_caller_save_xmms();
3680       for (j = 0; j < num_caller_save_xmm_regs; j++) {
3681         state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
3682       }
3683 #endif
3684     }
3685 
3686     // process xhandler before output and temp operands
3687     XHandlers* xhandlers = visitor.all_xhandler();
3688     n = xhandlers->length();
3689     for (int k = 0; k < n; k++) {
3690       process_xhandler(xhandlers->handler_at(k), input_state);
3691     }
3692 
3693     // set temp operands (some operations use temp operands also as output operands, so can't set them NULL)
3694     n = visitor.opr_count(LIR_OpVisitState::tempMode);
3695     for (j = 0; j < n; j++) {
3696       LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, j);
3697       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3698         Interval* interval = interval_at(reg_num(opr));
3699         if (op->id() != -1) {
3700           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::tempMode);
3701         }
3702 
3703         state_put(input_state, interval->assigned_reg(),   interval->split_parent());
3704         state_put(input_state, interval->assigned_regHi(), interval->split_parent());
3705       }
3706     }
3707 
3708     // set output operands
3709     n = visitor.opr_count(LIR_OpVisitState::outputMode);
3710     for (j = 0; j < n; j++) {
3711       LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, j);
3712       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3713         Interval* interval = interval_at(reg_num(opr));
3714         if (op->id() != -1) {
3715           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::outputMode);
3716         }
3717 
3718         state_put(input_state, interval->assigned_reg(),   interval->split_parent());
3719         state_put(input_state, interval->assigned_regHi(), interval->split_parent());
3720       }
3721     }
3722   }
3723   assert(has_error == false, "Error in register allocation");
3724 }
3725 
3726 #endif // ASSERT
3727 
3728 
3729 
3730 // **** Implementation of MoveResolver ******************************
3731 
MoveResolver(LinearScan * allocator)3732 MoveResolver::MoveResolver(LinearScan* allocator) :
3733   _allocator(allocator),
3734   _multiple_reads_allowed(false),
3735   _mapping_from(8),
3736   _mapping_from_opr(8),
3737   _mapping_to(8),
3738   _insert_list(NULL),
3739   _insert_idx(-1),
3740   _insertion_buffer()
3741 {
3742   for (int i = 0; i < LinearScan::nof_regs; i++) {
3743     _register_blocked[i] = 0;
3744   }
3745   DEBUG_ONLY(check_empty());
3746 }
3747 
3748 
3749 #ifdef ASSERT
3750 
check_empty()3751 void MoveResolver::check_empty() {
3752   assert(_mapping_from.length() == 0 && _mapping_from_opr.length() == 0 && _mapping_to.length() == 0, "list must be empty before and after processing");
3753   for (int i = 0; i < LinearScan::nof_regs; i++) {
3754     assert(register_blocked(i) == 0, "register map must be empty before and after processing");
3755   }
3756   assert(_multiple_reads_allowed == false, "must have default value");
3757 }
3758 
verify_before_resolve()3759 void MoveResolver::verify_before_resolve() {
3760   assert(_mapping_from.length() == _mapping_from_opr.length(), "length must be equal");
3761   assert(_mapping_from.length() == _mapping_to.length(), "length must be equal");
3762   assert(_insert_list != NULL && _insert_idx != -1, "insert position not set");
3763 
3764   int i, j;
3765   if (!_multiple_reads_allowed) {
3766     for (i = 0; i < _mapping_from.length(); i++) {
3767       for (j = i + 1; j < _mapping_from.length(); j++) {
3768         assert(_mapping_from.at(i) == NULL || _mapping_from.at(i) != _mapping_from.at(j), "cannot read from same interval twice");
3769       }
3770     }
3771   }
3772 
3773   for (i = 0; i < _mapping_to.length(); i++) {
3774     for (j = i + 1; j < _mapping_to.length(); j++) {
3775       assert(_mapping_to.at(i) != _mapping_to.at(j), "cannot write to same interval twice");
3776     }
3777   }
3778 
3779 
3780   ResourceBitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills());
3781   if (!_multiple_reads_allowed) {
3782     for (i = 0; i < _mapping_from.length(); i++) {
3783       Interval* it = _mapping_from.at(i);
3784       if (it != NULL) {
3785         assert(!used_regs.at(it->assigned_reg()), "cannot read from same register twice");
3786         used_regs.set_bit(it->assigned_reg());
3787 
3788         if (it->assigned_regHi() != LinearScan::any_reg) {
3789           assert(!used_regs.at(it->assigned_regHi()), "cannot read from same register twice");
3790           used_regs.set_bit(it->assigned_regHi());
3791         }
3792       }
3793     }
3794   }
3795 
3796   used_regs.clear();
3797   for (i = 0; i < _mapping_to.length(); i++) {
3798     Interval* it = _mapping_to.at(i);
3799     assert(!used_regs.at(it->assigned_reg()), "cannot write to same register twice");
3800     used_regs.set_bit(it->assigned_reg());
3801 
3802     if (it->assigned_regHi() != LinearScan::any_reg) {
3803       assert(!used_regs.at(it->assigned_regHi()), "cannot write to same register twice");
3804       used_regs.set_bit(it->assigned_regHi());
3805     }
3806   }
3807 
3808   used_regs.clear();
3809   for (i = 0; i < _mapping_from.length(); i++) {
3810     Interval* it = _mapping_from.at(i);
3811     if (it != NULL && it->assigned_reg() >= LinearScan::nof_regs) {
3812       used_regs.set_bit(it->assigned_reg());
3813     }
3814   }
3815   for (i = 0; i < _mapping_to.length(); i++) {
3816     Interval* it = _mapping_to.at(i);
3817     assert(!used_regs.at(it->assigned_reg()) || it->assigned_reg() == _mapping_from.at(i)->assigned_reg(), "stack slots used in _mapping_from must be disjoint to _mapping_to");
3818   }
3819 }
3820 
3821 #endif // ASSERT
3822 
3823 
3824 // mark assigned_reg and assigned_regHi of the interval as blocked
block_registers(Interval * it)3825 void MoveResolver::block_registers(Interval* it) {
3826   int reg = it->assigned_reg();
3827   if (reg < LinearScan::nof_regs) {
3828     assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used");
3829     set_register_blocked(reg, 1);
3830   }
3831   reg = it->assigned_regHi();
3832   if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
3833     assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used");
3834     set_register_blocked(reg, 1);
3835   }
3836 }
3837 
3838 // mark assigned_reg and assigned_regHi of the interval as unblocked
unblock_registers(Interval * it)3839 void MoveResolver::unblock_registers(Interval* it) {
3840   int reg = it->assigned_reg();
3841   if (reg < LinearScan::nof_regs) {
3842     assert(register_blocked(reg) > 0, "register already marked as unused");
3843     set_register_blocked(reg, -1);
3844   }
3845   reg = it->assigned_regHi();
3846   if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
3847     assert(register_blocked(reg) > 0, "register already marked as unused");
3848     set_register_blocked(reg, -1);
3849   }
3850 }
3851 
3852 // check if assigned_reg and assigned_regHi of the to-interval are not blocked (or only blocked by from)
save_to_process_move(Interval * from,Interval * to)3853 bool MoveResolver::save_to_process_move(Interval* from, Interval* to) {
3854   int from_reg = -1;
3855   int from_regHi = -1;
3856   if (from != NULL) {
3857     from_reg = from->assigned_reg();
3858     from_regHi = from->assigned_regHi();
3859   }
3860 
3861   int reg = to->assigned_reg();
3862   if (reg < LinearScan::nof_regs) {
3863     if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) {
3864       return false;
3865     }
3866   }
3867   reg = to->assigned_regHi();
3868   if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
3869     if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) {
3870       return false;
3871     }
3872   }
3873 
3874   return true;
3875 }
3876 
3877 
create_insertion_buffer(LIR_List * list)3878 void MoveResolver::create_insertion_buffer(LIR_List* list) {
3879   assert(!_insertion_buffer.initialized(), "overwriting existing buffer");
3880   _insertion_buffer.init(list);
3881 }
3882 
append_insertion_buffer()3883 void MoveResolver::append_insertion_buffer() {
3884   if (_insertion_buffer.initialized()) {
3885     _insertion_buffer.lir_list()->append(&_insertion_buffer);
3886   }
3887   assert(!_insertion_buffer.initialized(), "must be uninitialized now");
3888 
3889   _insert_list = NULL;
3890   _insert_idx = -1;
3891 }
3892 
insert_move(Interval * from_interval,Interval * to_interval)3893 void MoveResolver::insert_move(Interval* from_interval, Interval* to_interval) {
3894   assert(from_interval->reg_num() != to_interval->reg_num(), "from and to interval equal");
3895   assert(from_interval->type() == to_interval->type(), "move between different types");
3896   assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
3897   assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
3898 
3899   LIR_Opr from_opr = get_virtual_register(from_interval);
3900   LIR_Opr to_opr = get_virtual_register(to_interval);
3901 
3902   if (!_multiple_reads_allowed) {
3903     // the last_use flag is an optimization for FPU stack allocation. When the same
3904     // input interval is used in more than one move, then it is too difficult to determine
3905     // if this move is really the last use.
3906     from_opr = from_opr->make_last_use();
3907   }
3908   _insertion_buffer.move(_insert_idx, from_opr, to_opr);
3909 
3910   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: inserted move from register %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
3911 }
3912 
insert_move(LIR_Opr from_opr,Interval * to_interval)3913 void MoveResolver::insert_move(LIR_Opr from_opr, Interval* to_interval) {
3914   assert(from_opr->type() == to_interval->type(), "move between different types");
3915   assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
3916   assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
3917 
3918   LIR_Opr to_opr = get_virtual_register(to_interval);
3919   _insertion_buffer.move(_insert_idx, from_opr, to_opr);
3920 
3921   TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: inserted move from constant "); from_opr->print(); tty->print_cr("  to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
3922 }
3923 
get_virtual_register(Interval * interval)3924 LIR_Opr MoveResolver::get_virtual_register(Interval* interval) {
3925   // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
3926   // a few extra registers before we really run out which helps to avoid to trip over assertions.
3927   int reg_num = interval->reg_num();
3928   if (reg_num + 20 >= LIR_OprDesc::vreg_max) {
3929     _allocator->bailout("out of virtual registers in linear scan");
3930     if (reg_num + 2 >= LIR_OprDesc::vreg_max) {
3931       // Wrap it around and continue until bailout really happens to avoid hitting assertions.
3932       reg_num = LIR_OprDesc::vreg_base;
3933     }
3934   }
3935   LIR_Opr vreg = LIR_OprFact::virtual_register(reg_num, interval->type());
3936   assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
3937   return vreg;
3938 }
3939 
resolve_mappings()3940 void MoveResolver::resolve_mappings() {
3941   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: resolving mappings for Block B%d, index %d", _insert_list->block() != NULL ? _insert_list->block()->block_id() : -1, _insert_idx));
3942   DEBUG_ONLY(verify_before_resolve());
3943 
3944   // Block all registers that are used as input operands of a move.
3945   // When a register is blocked, no move to this register is emitted.
3946   // This is necessary for detecting cycles in moves.
3947   int i;
3948   for (i = _mapping_from.length() - 1; i >= 0; i--) {
3949     Interval* from_interval = _mapping_from.at(i);
3950     if (from_interval != NULL) {
3951       block_registers(from_interval);
3952     }
3953   }
3954 
3955   int spill_candidate = -1;
3956   while (_mapping_from.length() > 0) {
3957     bool processed_interval = false;
3958 
3959     for (i = _mapping_from.length() - 1; i >= 0; i--) {
3960       Interval* from_interval = _mapping_from.at(i);
3961       Interval* to_interval = _mapping_to.at(i);
3962 
3963       if (save_to_process_move(from_interval, to_interval)) {
3964         // this inverval can be processed because target is free
3965         if (from_interval != NULL) {
3966           insert_move(from_interval, to_interval);
3967           unblock_registers(from_interval);
3968         } else {
3969           insert_move(_mapping_from_opr.at(i), to_interval);
3970         }
3971         _mapping_from.remove_at(i);
3972         _mapping_from_opr.remove_at(i);
3973         _mapping_to.remove_at(i);
3974 
3975         processed_interval = true;
3976       } else if (from_interval != NULL && from_interval->assigned_reg() < LinearScan::nof_regs) {
3977         // this interval cannot be processed now because target is not free
3978         // it starts in a register, so it is a possible candidate for spilling
3979         spill_candidate = i;
3980       }
3981     }
3982 
3983     if (!processed_interval) {
3984       // no move could be processed because there is a cycle in the move list
3985       // (e.g. r1 -> r2, r2 -> r1), so one interval must be spilled to memory
3986       guarantee(spill_candidate != -1, "no interval in register for spilling found");
3987 
3988       // create a new spill interval and assign a stack slot to it
3989       Interval* from_interval = _mapping_from.at(spill_candidate);
3990       Interval* spill_interval = new Interval(-1);
3991       spill_interval->set_type(from_interval->type());
3992 
3993       // add a dummy range because real position is difficult to calculate
3994       // Note: this range is a special case when the integrity of the allocation is checked
3995       spill_interval->add_range(1, 2);
3996 
3997       //       do not allocate a new spill slot for temporary interval, but
3998       //       use spill slot assigned to from_interval. Otherwise moves from
3999       //       one stack slot to another can happen (not allowed by LIR_Assembler
4000       int spill_slot = from_interval->canonical_spill_slot();
4001       if (spill_slot < 0) {
4002         spill_slot = allocator()->allocate_spill_slot(type2spill_size[spill_interval->type()] == 2);
4003         from_interval->set_canonical_spill_slot(spill_slot);
4004       }
4005       spill_interval->assign_reg(spill_slot);
4006       allocator()->append_interval(spill_interval);
4007 
4008       TRACE_LINEAR_SCAN(4, tty->print_cr("created new Interval %d for spilling", spill_interval->reg_num()));
4009 
4010       // insert a move from register to stack and update the mapping
4011       insert_move(from_interval, spill_interval);
4012       _mapping_from.at_put(spill_candidate, spill_interval);
4013       unblock_registers(from_interval);
4014     }
4015   }
4016 
4017   // reset to default value
4018   _multiple_reads_allowed = false;
4019 
4020   // check that all intervals have been processed
4021   DEBUG_ONLY(check_empty());
4022 }
4023 
4024 
set_insert_position(LIR_List * insert_list,int insert_idx)4025 void MoveResolver::set_insert_position(LIR_List* insert_list, int insert_idx) {
4026   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: setting insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx));
4027   assert(_insert_list == NULL && _insert_idx == -1, "use move_insert_position instead of set_insert_position when data already set");
4028 
4029   create_insertion_buffer(insert_list);
4030   _insert_list = insert_list;
4031   _insert_idx = insert_idx;
4032 }
4033 
move_insert_position(LIR_List * insert_list,int insert_idx)4034 void MoveResolver::move_insert_position(LIR_List* insert_list, int insert_idx) {
4035   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: moving insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx));
4036 
4037   if (_insert_list != NULL && (insert_list != _insert_list || insert_idx != _insert_idx)) {
4038     // insert position changed -> resolve current mappings
4039     resolve_mappings();
4040   }
4041 
4042   if (insert_list != _insert_list) {
4043     // block changed -> append insertion_buffer because it is
4044     // bound to a specific block and create a new insertion_buffer
4045     append_insertion_buffer();
4046     create_insertion_buffer(insert_list);
4047   }
4048 
4049   _insert_list = insert_list;
4050   _insert_idx = insert_idx;
4051 }
4052 
add_mapping(Interval * from_interval,Interval * to_interval)4053 void MoveResolver::add_mapping(Interval* from_interval, Interval* to_interval) {
4054   TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: adding mapping from %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
4055 
4056   _mapping_from.append(from_interval);
4057   _mapping_from_opr.append(LIR_OprFact::illegalOpr);
4058   _mapping_to.append(to_interval);
4059 }
4060 
4061 
add_mapping(LIR_Opr from_opr,Interval * to_interval)4062 void MoveResolver::add_mapping(LIR_Opr from_opr, Interval* to_interval) {
4063   TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: adding mapping from "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
4064   assert(from_opr->is_constant(), "only for constants");
4065 
4066   _mapping_from.append(NULL);
4067   _mapping_from_opr.append(from_opr);
4068   _mapping_to.append(to_interval);
4069 }
4070 
resolve_and_append_moves()4071 void MoveResolver::resolve_and_append_moves() {
4072   if (has_mappings()) {
4073     resolve_mappings();
4074   }
4075   append_insertion_buffer();
4076 }
4077 
4078 
4079 
4080 // **** Implementation of Range *************************************
4081 
Range(int from,int to,Range * next)4082 Range::Range(int from, int to, Range* next) :
4083   _from(from),
4084   _to(to),
4085   _next(next)
4086 {
4087 }
4088 
4089 // initialize sentinel
4090 Range* Range::_end = NULL;
initialize(Arena * arena)4091 void Range::initialize(Arena* arena) {
4092   _end = new (arena) Range(max_jint, max_jint, NULL);
4093 }
4094 
intersects_at(Range * r2) const4095 int Range::intersects_at(Range* r2) const {
4096   const Range* r1 = this;
4097 
4098   assert(r1 != NULL && r2 != NULL, "null ranges not allowed");
4099   assert(r1 != _end && r2 != _end, "empty ranges not allowed");
4100 
4101   do {
4102     if (r1->from() < r2->from()) {
4103       if (r1->to() <= r2->from()) {
4104         r1 = r1->next(); if (r1 == _end) return -1;
4105       } else {
4106         return r2->from();
4107       }
4108     } else if (r2->from() < r1->from()) {
4109       if (r2->to() <= r1->from()) {
4110         r2 = r2->next(); if (r2 == _end) return -1;
4111       } else {
4112         return r1->from();
4113       }
4114     } else { // r1->from() == r2->from()
4115       if (r1->from() == r1->to()) {
4116         r1 = r1->next(); if (r1 == _end) return -1;
4117       } else if (r2->from() == r2->to()) {
4118         r2 = r2->next(); if (r2 == _end) return -1;
4119       } else {
4120         return r1->from();
4121       }
4122     }
4123   } while (true);
4124 }
4125 
4126 #ifndef PRODUCT
print(outputStream * out) const4127 void Range::print(outputStream* out) const {
4128   out->print("[%d, %d[ ", _from, _to);
4129 }
4130 #endif
4131 
4132 
4133 
4134 // **** Implementation of Interval **********************************
4135 
4136 // initialize sentinel
4137 Interval* Interval::_end = NULL;
initialize(Arena * arena)4138 void Interval::initialize(Arena* arena) {
4139   Range::initialize(arena);
4140   _end = new (arena) Interval(-1);
4141 }
4142 
Interval(int reg_num)4143 Interval::Interval(int reg_num) :
4144   _reg_num(reg_num),
4145   _type(T_ILLEGAL),
4146   _first(Range::end()),
4147   _use_pos_and_kinds(12),
4148   _current(Range::end()),
4149   _next(_end),
4150   _state(invalidState),
4151   _assigned_reg(LinearScan::any_reg),
4152   _assigned_regHi(LinearScan::any_reg),
4153   _cached_to(-1),
4154   _cached_opr(LIR_OprFact::illegalOpr),
4155   _cached_vm_reg(VMRegImpl::Bad()),
4156   _split_children(NULL),
4157   _canonical_spill_slot(-1),
4158   _insert_move_when_activated(false),
4159   _register_hint(NULL),
4160   _spill_state(noDefinitionFound),
4161   _spill_definition_pos(-1)
4162 {
4163   _split_parent = this;
4164   _current_split_child = this;
4165 }
4166 
calc_to()4167 int Interval::calc_to() {
4168   assert(_first != Range::end(), "interval has no range");
4169 
4170   Range* r = _first;
4171   while (r->next() != Range::end()) {
4172     r = r->next();
4173   }
4174   return r->to();
4175 }
4176 
4177 
4178 #ifdef ASSERT
4179 // consistency check of split-children
check_split_children()4180 void Interval::check_split_children() {
4181   if (_split_children != NULL && _split_children->length() > 0) {
4182     assert(is_split_parent(), "only split parents can have children");
4183 
4184     for (int i = 0; i < _split_children->length(); i++) {
4185       Interval* i1 = _split_children->at(i);
4186 
4187       assert(i1->split_parent() == this, "not a split child of this interval");
4188       assert(i1->type() == type(), "must be equal for all split children");
4189       assert(i1->canonical_spill_slot() == canonical_spill_slot(), "must be equal for all split children");
4190 
4191       for (int j = i + 1; j < _split_children->length(); j++) {
4192         Interval* i2 = _split_children->at(j);
4193 
4194         assert(i1->reg_num() != i2->reg_num(), "same register number");
4195 
4196         if (i1->from() < i2->from()) {
4197           assert(i1->to() <= i2->from() && i1->to() < i2->to(), "intervals overlapping");
4198         } else {
4199           assert(i2->from() < i1->from(), "intervals start at same op_id");
4200           assert(i2->to() <= i1->from() && i2->to() < i1->to(), "intervals overlapping");
4201         }
4202       }
4203     }
4204   }
4205 }
4206 #endif // ASSERT
4207 
register_hint(bool search_split_child) const4208 Interval* Interval::register_hint(bool search_split_child) const {
4209   if (!search_split_child) {
4210     return _register_hint;
4211   }
4212 
4213   if (_register_hint != NULL) {
4214     assert(_register_hint->is_split_parent(), "ony split parents are valid hint registers");
4215 
4216     if (_register_hint->assigned_reg() >= 0 && _register_hint->assigned_reg() < LinearScan::nof_regs) {
4217       return _register_hint;
4218 
4219     } else if (_register_hint->_split_children != NULL && _register_hint->_split_children->length() > 0) {
4220       // search the first split child that has a register assigned
4221       int len = _register_hint->_split_children->length();
4222       for (int i = 0; i < len; i++) {
4223         Interval* cur = _register_hint->_split_children->at(i);
4224 
4225         if (cur->assigned_reg() >= 0 && cur->assigned_reg() < LinearScan::nof_regs) {
4226           return cur;
4227         }
4228       }
4229     }
4230   }
4231 
4232   // no hint interval found that has a register assigned
4233   return NULL;
4234 }
4235 
4236 
split_child_at_op_id(int op_id,LIR_OpVisitState::OprMode mode)4237 Interval* Interval::split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode) {
4238   assert(is_split_parent(), "can only be called for split parents");
4239   assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)");
4240 
4241   Interval* result;
4242   if (_split_children == NULL || _split_children->length() == 0) {
4243     result = this;
4244   } else {
4245     result = NULL;
4246     int len = _split_children->length();
4247 
4248     // in outputMode, the end of the interval (op_id == cur->to()) is not valid
4249     int to_offset = (mode == LIR_OpVisitState::outputMode ? 0 : 1);
4250 
4251     int i;
4252     for (i = 0; i < len; i++) {
4253       Interval* cur = _split_children->at(i);
4254       if (cur->from() <= op_id && op_id < cur->to() + to_offset) {
4255         if (i > 0) {
4256           // exchange current split child to start of list (faster access for next call)
4257           _split_children->at_put(i, _split_children->at(0));
4258           _split_children->at_put(0, cur);
4259         }
4260 
4261         // interval found
4262         result = cur;
4263         break;
4264       }
4265     }
4266 
4267 #ifdef ASSERT
4268     for (i = 0; i < len; i++) {
4269       Interval* tmp = _split_children->at(i);
4270       if (tmp != result && tmp->from() <= op_id && op_id < tmp->to() + to_offset) {
4271         tty->print_cr("two valid result intervals found for op_id %d: %d and %d", op_id, result->reg_num(), tmp->reg_num());
4272         result->print();
4273         tmp->print();
4274         assert(false, "two valid result intervals found");
4275       }
4276     }
4277 #endif
4278   }
4279 
4280   assert(result != NULL, "no matching interval found");
4281   assert(result->covers(op_id, mode), "op_id not covered by interval");
4282 
4283   return result;
4284 }
4285 
4286 
4287 // returns the last split child that ends before the given op_id
split_child_before_op_id(int op_id)4288 Interval* Interval::split_child_before_op_id(int op_id) {
4289   assert(op_id >= 0, "invalid op_id");
4290 
4291   Interval* parent = split_parent();
4292   Interval* result = NULL;
4293 
4294   assert(parent->_split_children != NULL, "no split children available");
4295   int len = parent->_split_children->length();
4296   assert(len > 0, "no split children available");
4297 
4298   for (int i = len - 1; i >= 0; i--) {
4299     Interval* cur = parent->_split_children->at(i);
4300     if (cur->to() <= op_id && (result == NULL || result->to() < cur->to())) {
4301       result = cur;
4302     }
4303   }
4304 
4305   assert(result != NULL, "no split child found");
4306   return result;
4307 }
4308 
4309 
4310 // Note: use positions are sorted descending -> first use has highest index
first_usage(IntervalUseKind min_use_kind) const4311 int Interval::first_usage(IntervalUseKind min_use_kind) const {
4312   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4313 
4314   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4315     if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4316       return _use_pos_and_kinds.at(i);
4317     }
4318   }
4319   return max_jint;
4320 }
4321 
next_usage(IntervalUseKind min_use_kind,int from) const4322 int Interval::next_usage(IntervalUseKind min_use_kind, int from) const {
4323   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4324 
4325   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4326     if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4327       return _use_pos_and_kinds.at(i);
4328     }
4329   }
4330   return max_jint;
4331 }
4332 
next_usage_exact(IntervalUseKind exact_use_kind,int from) const4333 int Interval::next_usage_exact(IntervalUseKind exact_use_kind, int from) const {
4334   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4335 
4336   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4337     if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) == exact_use_kind) {
4338       return _use_pos_and_kinds.at(i);
4339     }
4340   }
4341   return max_jint;
4342 }
4343 
previous_usage(IntervalUseKind min_use_kind,int from) const4344 int Interval::previous_usage(IntervalUseKind min_use_kind, int from) const {
4345   assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
4346 
4347   int prev = 0;
4348   for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4349     if (_use_pos_and_kinds.at(i) > from) {
4350       return prev;
4351     }
4352     if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4353       prev = _use_pos_and_kinds.at(i);
4354     }
4355   }
4356   return prev;
4357 }
4358 
add_use_pos(int pos,IntervalUseKind use_kind)4359 void Interval::add_use_pos(int pos, IntervalUseKind use_kind) {
4360   assert(covers(pos, LIR_OpVisitState::inputMode), "use position not covered by live range");
4361 
4362   // do not add use positions for precolored intervals because
4363   // they are never used
4364   if (use_kind != noUse && reg_num() >= LIR_OprDesc::vreg_base) {
4365 #ifdef ASSERT
4366     assert(_use_pos_and_kinds.length() % 2 == 0, "must be");
4367     for (int i = 0; i < _use_pos_and_kinds.length(); i += 2) {
4368       assert(pos <= _use_pos_and_kinds.at(i), "already added a use-position with lower position");
4369       assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4370       if (i > 0) {
4371         assert(_use_pos_and_kinds.at(i) < _use_pos_and_kinds.at(i - 2), "not sorted descending");
4372       }
4373     }
4374 #endif
4375 
4376     // Note: add_use is called in descending order, so list gets sorted
4377     //       automatically by just appending new use positions
4378     int len = _use_pos_and_kinds.length();
4379     if (len == 0 || _use_pos_and_kinds.at(len - 2) > pos) {
4380       _use_pos_and_kinds.append(pos);
4381       _use_pos_and_kinds.append(use_kind);
4382     } else if (_use_pos_and_kinds.at(len - 1) < use_kind) {
4383       assert(_use_pos_and_kinds.at(len - 2) == pos, "list not sorted correctly");
4384       _use_pos_and_kinds.at_put(len - 1, use_kind);
4385     }
4386   }
4387 }
4388 
add_range(int from,int to)4389 void Interval::add_range(int from, int to) {
4390   assert(from < to, "invalid range");
4391   assert(first() == Range::end() || to < first()->next()->from(), "not inserting at begin of interval");
4392   assert(from <= first()->to(), "not inserting at begin of interval");
4393 
4394   if (first()->from() <= to) {
4395     // join intersecting ranges
4396     first()->set_from(MIN2(from, first()->from()));
4397     first()->set_to  (MAX2(to,   first()->to()));
4398   } else {
4399     // insert new range
4400     _first = new Range(from, to, first());
4401   }
4402 }
4403 
new_split_child()4404 Interval* Interval::new_split_child() {
4405   // allocate new interval
4406   Interval* result = new Interval(-1);
4407   result->set_type(type());
4408 
4409   Interval* parent = split_parent();
4410   result->_split_parent = parent;
4411   result->set_register_hint(parent);
4412 
4413   // insert new interval in children-list of parent
4414   if (parent->_split_children == NULL) {
4415     assert(is_split_parent(), "list must be initialized at first split");
4416 
4417     parent->_split_children = new IntervalList(4);
4418     parent->_split_children->append(this);
4419   }
4420   parent->_split_children->append(result);
4421 
4422   return result;
4423 }
4424 
4425 // split this interval at the specified position and return
4426 // the remainder as a new interval.
4427 //
4428 // when an interval is split, a bi-directional link is established between the original interval
4429 // (the split parent) and the intervals that are split off this interval (the split children)
4430 // When a split child is split again, the new created interval is also a direct child
4431 // of the original parent (there is no tree of split children stored, but a flat list)
4432 // All split children are spilled to the same stack slot (stored in _canonical_spill_slot)
4433 //
4434 // Note: The new interval has no valid reg_num
split(int split_pos)4435 Interval* Interval::split(int split_pos) {
4436   assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals");
4437 
4438   // allocate new interval
4439   Interval* result = new_split_child();
4440 
4441   // split the ranges
4442   Range* prev = NULL;
4443   Range* cur = _first;
4444   while (cur != Range::end() && cur->to() <= split_pos) {
4445     prev = cur;
4446     cur = cur->next();
4447   }
4448   assert(cur != Range::end(), "split interval after end of last range");
4449 
4450   if (cur->from() < split_pos) {
4451     result->_first = new Range(split_pos, cur->to(), cur->next());
4452     cur->set_to(split_pos);
4453     cur->set_next(Range::end());
4454 
4455   } else {
4456     assert(prev != NULL, "split before start of first range");
4457     result->_first = cur;
4458     prev->set_next(Range::end());
4459   }
4460   result->_current = result->_first;
4461   _cached_to = -1; // clear cached value
4462 
4463   // split list of use positions
4464   int total_len = _use_pos_and_kinds.length();
4465   int start_idx = total_len - 2;
4466   while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) {
4467     start_idx -= 2;
4468   }
4469 
4470   intStack new_use_pos_and_kinds(total_len - start_idx);
4471   int i;
4472   for (i = start_idx + 2; i < total_len; i++) {
4473     new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i));
4474   }
4475 
4476   _use_pos_and_kinds.trunc_to(start_idx + 2);
4477   result->_use_pos_and_kinds = _use_pos_and_kinds;
4478   _use_pos_and_kinds = new_use_pos_and_kinds;
4479 
4480 #ifdef ASSERT
4481   assert(_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4482   assert(result->_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4483   assert(_use_pos_and_kinds.length() + result->_use_pos_and_kinds.length() == total_len, "missed some entries");
4484 
4485   for (i = 0; i < _use_pos_and_kinds.length(); i += 2) {
4486     assert(_use_pos_and_kinds.at(i) < split_pos, "must be");
4487     assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4488   }
4489   for (i = 0; i < result->_use_pos_and_kinds.length(); i += 2) {
4490     assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be");
4491     assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4492   }
4493 #endif
4494 
4495   return result;
4496 }
4497 
4498 // split this interval at the specified position and return
4499 // the head as a new interval (the original interval is the tail)
4500 //
4501 // Currently, only the first range can be split, and the new interval
4502 // must not have split positions
split_from_start(int split_pos)4503 Interval* Interval::split_from_start(int split_pos) {
4504   assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals");
4505   assert(split_pos > from() && split_pos < to(), "can only split inside interval");
4506   assert(split_pos > _first->from() && split_pos <= _first->to(), "can only split inside first range");
4507   assert(first_usage(noUse) > split_pos, "can not split when use positions are present");
4508 
4509   // allocate new interval
4510   Interval* result = new_split_child();
4511 
4512   // the new created interval has only one range (checked by assertion above),
4513   // so the splitting of the ranges is very simple
4514   result->add_range(_first->from(), split_pos);
4515 
4516   if (split_pos == _first->to()) {
4517     assert(_first->next() != Range::end(), "must not be at end");
4518     _first = _first->next();
4519   } else {
4520     _first->set_from(split_pos);
4521   }
4522 
4523   return result;
4524 }
4525 
4526 
4527 // returns true if the op_id is inside the interval
covers(int op_id,LIR_OpVisitState::OprMode mode) const4528 bool Interval::covers(int op_id, LIR_OpVisitState::OprMode mode) const {
4529   Range* cur  = _first;
4530 
4531   while (cur != Range::end() && cur->to() < op_id) {
4532     cur = cur->next();
4533   }
4534   if (cur != Range::end()) {
4535     assert(cur->to() != cur->next()->from(), "ranges not separated");
4536 
4537     if (mode == LIR_OpVisitState::outputMode) {
4538       return cur->from() <= op_id && op_id < cur->to();
4539     } else {
4540       return cur->from() <= op_id && op_id <= cur->to();
4541     }
4542   }
4543   return false;
4544 }
4545 
4546 // returns true if the interval has any hole between hole_from and hole_to
4547 // (even if the hole has only the length 1)
has_hole_between(int hole_from,int hole_to)4548 bool Interval::has_hole_between(int hole_from, int hole_to) {
4549   assert(hole_from < hole_to, "check");
4550   assert(from() <= hole_from && hole_to <= to(), "index out of interval");
4551 
4552   Range* cur  = _first;
4553   while (cur != Range::end()) {
4554     assert(cur->to() < cur->next()->from(), "no space between ranges");
4555 
4556     // hole-range starts before this range -> hole
4557     if (hole_from < cur->from()) {
4558       return true;
4559 
4560     // hole-range completely inside this range -> no hole
4561     } else if (hole_to <= cur->to()) {
4562       return false;
4563 
4564     // overlapping of hole-range with this range -> hole
4565     } else if (hole_from <= cur->to()) {
4566       return true;
4567     }
4568 
4569     cur = cur->next();
4570   }
4571 
4572   return false;
4573 }
4574 
4575 // Check if there is an intersection with any of the split children of 'interval'
intersects_any_children_of(Interval * interval) const4576 bool Interval::intersects_any_children_of(Interval* interval) const {
4577   if (interval->_split_children != NULL) {
4578     for (int i = 0; i < interval->_split_children->length(); i++) {
4579       if (intersects(interval->_split_children->at(i))) {
4580         return true;
4581       }
4582     }
4583   }
4584   return false;
4585 }
4586 
4587 
4588 #ifndef PRODUCT
print(outputStream * out) const4589 void Interval::print(outputStream* out) const {
4590   const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" };
4591   const char* UseKind2Name[] = { "N", "L", "S", "M" };
4592 
4593   const char* type_name;
4594   LIR_Opr opr = LIR_OprFact::illegal();
4595   if (reg_num() < LIR_OprDesc::vreg_base) {
4596     type_name = "fixed";
4597     // need a temporary operand for fixed intervals because type() cannot be called
4598 #ifdef X86
4599     int last_xmm_reg = pd_last_xmm_reg;
4600 #ifdef _LP64
4601     if (UseAVX < 3) {
4602       last_xmm_reg = pd_first_xmm_reg + (pd_nof_xmm_regs_frame_map / 2) - 1;
4603     }
4604 #endif
4605 #endif
4606     if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
4607       opr = LIR_OprFact::single_cpu(assigned_reg());
4608     } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
4609       opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
4610 #ifdef X86
4611     } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= last_xmm_reg) {
4612       opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
4613 #endif
4614     } else {
4615       ShouldNotReachHere();
4616     }
4617   } else {
4618     type_name = type2name(type());
4619     if (assigned_reg() != -1 &&
4620         (LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
4621       opr = LinearScan::calc_operand_for_interval(this);
4622     }
4623   }
4624 
4625   out->print("%d %s ", reg_num(), type_name);
4626   if (opr->is_valid()) {
4627     out->print("\"");
4628     opr->print(out);
4629     out->print("\" ");
4630   }
4631   out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1));
4632 
4633   // print ranges
4634   Range* cur = _first;
4635   while (cur != Range::end()) {
4636     cur->print(out);
4637     cur = cur->next();
4638     assert(cur != NULL, "range list not closed with range sentinel");
4639   }
4640 
4641   // print use positions
4642   int prev = 0;
4643   assert(_use_pos_and_kinds.length() % 2 == 0, "must be");
4644   for (int i =_use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
4645     assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4646     assert(prev < _use_pos_and_kinds.at(i), "use positions not sorted");
4647 
4648     out->print("%d %s ", _use_pos_and_kinds.at(i), UseKind2Name[_use_pos_and_kinds.at(i + 1)]);
4649     prev = _use_pos_and_kinds.at(i);
4650   }
4651 
4652   out->print(" \"%s\"", SpillState2Name[spill_state()]);
4653   out->cr();
4654 }
4655 #endif
4656 
4657 
4658 
4659 // **** Implementation of IntervalWalker ****************************
4660 
IntervalWalker(LinearScan * allocator,Interval * unhandled_fixed_first,Interval * unhandled_any_first)4661 IntervalWalker::IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first)
4662  : _compilation(allocator->compilation())
4663  , _allocator(allocator)
4664 {
4665   _unhandled_first[fixedKind] = unhandled_fixed_first;
4666   _unhandled_first[anyKind]   = unhandled_any_first;
4667   _active_first[fixedKind]    = Interval::end();
4668   _inactive_first[fixedKind]  = Interval::end();
4669   _active_first[anyKind]      = Interval::end();
4670   _inactive_first[anyKind]    = Interval::end();
4671   _current_position = -1;
4672   _current = NULL;
4673   next_interval();
4674 }
4675 
4676 
4677 // append interval in order of current range from()
append_sorted(Interval ** list,Interval * interval)4678 void IntervalWalker::append_sorted(Interval** list, Interval* interval) {
4679   Interval* prev = NULL;
4680   Interval* cur  = *list;
4681   while (cur->current_from() < interval->current_from()) {
4682     prev = cur; cur = cur->next();
4683   }
4684   if (prev == NULL) {
4685     *list = interval;
4686   } else {
4687     prev->set_next(interval);
4688   }
4689   interval->set_next(cur);
4690 }
4691 
append_to_unhandled(Interval ** list,Interval * interval)4692 void IntervalWalker::append_to_unhandled(Interval** list, Interval* interval) {
4693   assert(interval->from() >= current()->current_from(), "cannot append new interval before current walk position");
4694 
4695   Interval* prev = NULL;
4696   Interval* cur  = *list;
4697   while (cur->from() < interval->from() || (cur->from() == interval->from() && cur->first_usage(noUse) < interval->first_usage(noUse))) {
4698     prev = cur; cur = cur->next();
4699   }
4700   if (prev == NULL) {
4701     *list = interval;
4702   } else {
4703     prev->set_next(interval);
4704   }
4705   interval->set_next(cur);
4706 }
4707 
4708 
remove_from_list(Interval ** list,Interval * i)4709 inline bool IntervalWalker::remove_from_list(Interval** list, Interval* i) {
4710   while (*list != Interval::end() && *list != i) {
4711     list = (*list)->next_addr();
4712   }
4713   if (*list != Interval::end()) {
4714     assert(*list == i, "check");
4715     *list = (*list)->next();
4716     return true;
4717   } else {
4718     return false;
4719   }
4720 }
4721 
remove_from_list(Interval * i)4722 void IntervalWalker::remove_from_list(Interval* i) {
4723   bool deleted;
4724 
4725   if (i->state() == activeState) {
4726     deleted = remove_from_list(active_first_addr(anyKind), i);
4727   } else {
4728     assert(i->state() == inactiveState, "invalid state");
4729     deleted = remove_from_list(inactive_first_addr(anyKind), i);
4730   }
4731 
4732   assert(deleted, "interval has not been found in list");
4733 }
4734 
4735 
walk_to(IntervalState state,int from)4736 void IntervalWalker::walk_to(IntervalState state, int from) {
4737   assert (state == activeState || state == inactiveState, "wrong state");
4738   for_each_interval_kind(kind) {
4739     Interval** prev = state == activeState ? active_first_addr(kind) : inactive_first_addr(kind);
4740     Interval* next   = *prev;
4741     while (next->current_from() <= from) {
4742       Interval* cur = next;
4743       next = cur->next();
4744 
4745       bool range_has_changed = false;
4746       while (cur->current_to() <= from) {
4747         cur->next_range();
4748         range_has_changed = true;
4749       }
4750 
4751       // also handle move from inactive list to active list
4752       range_has_changed = range_has_changed || (state == inactiveState && cur->current_from() <= from);
4753 
4754       if (range_has_changed) {
4755         // remove cur from list
4756         *prev = next;
4757         if (cur->current_at_end()) {
4758           // move to handled state (not maintained as a list)
4759           cur->set_state(handledState);
4760           interval_moved(cur, kind, state, handledState);
4761         } else if (cur->current_from() <= from){
4762           // sort into active list
4763           append_sorted(active_first_addr(kind), cur);
4764           cur->set_state(activeState);
4765           if (*prev == cur) {
4766             assert(state == activeState, "check");
4767             prev = cur->next_addr();
4768           }
4769           interval_moved(cur, kind, state, activeState);
4770         } else {
4771           // sort into inactive list
4772           append_sorted(inactive_first_addr(kind), cur);
4773           cur->set_state(inactiveState);
4774           if (*prev == cur) {
4775             assert(state == inactiveState, "check");
4776             prev = cur->next_addr();
4777           }
4778           interval_moved(cur, kind, state, inactiveState);
4779         }
4780       } else {
4781         prev = cur->next_addr();
4782         continue;
4783       }
4784     }
4785   }
4786 }
4787 
4788 
next_interval()4789 void IntervalWalker::next_interval() {
4790   IntervalKind kind;
4791   Interval* any   = _unhandled_first[anyKind];
4792   Interval* fixed = _unhandled_first[fixedKind];
4793 
4794   if (any != Interval::end()) {
4795     // intervals may start at same position -> prefer fixed interval
4796     kind = fixed != Interval::end() && fixed->from() <= any->from() ? fixedKind : anyKind;
4797 
4798     assert (kind == fixedKind && fixed->from() <= any->from() ||
4799             kind == anyKind   && any->from() <= fixed->from(), "wrong interval!!!");
4800     assert(any == Interval::end() || fixed == Interval::end() || any->from() != fixed->from() || kind == fixedKind, "if fixed and any-Interval start at same position, fixed must be processed first");
4801 
4802   } else if (fixed != Interval::end()) {
4803     kind = fixedKind;
4804   } else {
4805     _current = NULL; return;
4806   }
4807   _current_kind = kind;
4808   _current = _unhandled_first[kind];
4809   _unhandled_first[kind] = _current->next();
4810   _current->set_next(Interval::end());
4811   _current->rewind_range();
4812 }
4813 
4814 
walk_to(int lir_op_id)4815 void IntervalWalker::walk_to(int lir_op_id) {
4816   assert(_current_position <= lir_op_id, "can not walk backwards");
4817   while (current() != NULL) {
4818     bool is_active = current()->from() <= lir_op_id;
4819     int id = is_active ? current()->from() : lir_op_id;
4820 
4821     TRACE_LINEAR_SCAN(2, if (_current_position < id) { tty->cr(); tty->print_cr("walk_to(%d) **************************************************************", id); })
4822 
4823     // set _current_position prior to call of walk_to
4824     _current_position = id;
4825 
4826     // call walk_to even if _current_position == id
4827     walk_to(activeState, id);
4828     walk_to(inactiveState, id);
4829 
4830     if (is_active) {
4831       current()->set_state(activeState);
4832       if (activate_current()) {
4833         append_sorted(active_first_addr(current_kind()), current());
4834         interval_moved(current(), current_kind(), unhandledState, activeState);
4835       }
4836 
4837       next_interval();
4838     } else {
4839       return;
4840     }
4841   }
4842 }
4843 
interval_moved(Interval * interval,IntervalKind kind,IntervalState from,IntervalState to)4844 void IntervalWalker::interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to) {
4845 #ifndef PRODUCT
4846   if (TraceLinearScanLevel >= 4) {
4847     #define print_state(state) \
4848     switch(state) {\
4849       case unhandledState: tty->print("unhandled"); break;\
4850       case activeState: tty->print("active"); break;\
4851       case inactiveState: tty->print("inactive"); break;\
4852       case handledState: tty->print("handled"); break;\
4853       default: ShouldNotReachHere(); \
4854     }
4855 
4856     print_state(from); tty->print(" to "); print_state(to);
4857     tty->fill_to(23);
4858     interval->print();
4859 
4860     #undef print_state
4861   }
4862 #endif
4863 }
4864 
4865 
4866 
4867 // **** Implementation of LinearScanWalker **************************
4868 
LinearScanWalker(LinearScan * allocator,Interval * unhandled_fixed_first,Interval * unhandled_any_first)4869 LinearScanWalker::LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first)
4870   : IntervalWalker(allocator, unhandled_fixed_first, unhandled_any_first)
4871   , _move_resolver(allocator)
4872 {
4873   for (int i = 0; i < LinearScan::nof_regs; i++) {
4874     _spill_intervals[i] = new IntervalList(2);
4875   }
4876 }
4877 
4878 
init_use_lists(bool only_process_use_pos)4879 inline void LinearScanWalker::init_use_lists(bool only_process_use_pos) {
4880   for (int i = _first_reg; i <= _last_reg; i++) {
4881     _use_pos[i] = max_jint;
4882 
4883     if (!only_process_use_pos) {
4884       _block_pos[i] = max_jint;
4885       _spill_intervals[i]->clear();
4886     }
4887   }
4888 }
4889 
exclude_from_use(int reg)4890 inline void LinearScanWalker::exclude_from_use(int reg) {
4891   assert(reg < LinearScan::nof_regs, "interval must have a register assigned (stack slots not allowed)");
4892   if (reg >= _first_reg && reg <= _last_reg) {
4893     _use_pos[reg] = 0;
4894   }
4895 }
exclude_from_use(Interval * i)4896 inline void LinearScanWalker::exclude_from_use(Interval* i) {
4897   assert(i->assigned_reg() != any_reg, "interval has no register assigned");
4898 
4899   exclude_from_use(i->assigned_reg());
4900   exclude_from_use(i->assigned_regHi());
4901 }
4902 
set_use_pos(int reg,Interval * i,int use_pos,bool only_process_use_pos)4903 inline void LinearScanWalker::set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos) {
4904   assert(use_pos != 0, "must use exclude_from_use to set use_pos to 0");
4905 
4906   if (reg >= _first_reg && reg <= _last_reg) {
4907     if (_use_pos[reg] > use_pos) {
4908       _use_pos[reg] = use_pos;
4909     }
4910     if (!only_process_use_pos) {
4911       _spill_intervals[reg]->append(i);
4912     }
4913   }
4914 }
set_use_pos(Interval * i,int use_pos,bool only_process_use_pos)4915 inline void LinearScanWalker::set_use_pos(Interval* i, int use_pos, bool only_process_use_pos) {
4916   assert(i->assigned_reg() != any_reg, "interval has no register assigned");
4917   if (use_pos != -1) {
4918     set_use_pos(i->assigned_reg(), i, use_pos, only_process_use_pos);
4919     set_use_pos(i->assigned_regHi(), i, use_pos, only_process_use_pos);
4920   }
4921 }
4922 
set_block_pos(int reg,Interval * i,int block_pos)4923 inline void LinearScanWalker::set_block_pos(int reg, Interval* i, int block_pos) {
4924   if (reg >= _first_reg && reg <= _last_reg) {
4925     if (_block_pos[reg] > block_pos) {
4926       _block_pos[reg] = block_pos;
4927     }
4928     if (_use_pos[reg] > block_pos) {
4929       _use_pos[reg] = block_pos;
4930     }
4931   }
4932 }
set_block_pos(Interval * i,int block_pos)4933 inline void LinearScanWalker::set_block_pos(Interval* i, int block_pos) {
4934   assert(i->assigned_reg() != any_reg, "interval has no register assigned");
4935   if (block_pos != -1) {
4936     set_block_pos(i->assigned_reg(), i, block_pos);
4937     set_block_pos(i->assigned_regHi(), i, block_pos);
4938   }
4939 }
4940 
4941 
free_exclude_active_fixed()4942 void LinearScanWalker::free_exclude_active_fixed() {
4943   Interval* list = active_first(fixedKind);
4944   while (list != Interval::end()) {
4945     assert(list->assigned_reg() < LinearScan::nof_regs, "active interval must have a register assigned");
4946     exclude_from_use(list);
4947     list = list->next();
4948   }
4949 }
4950 
free_exclude_active_any()4951 void LinearScanWalker::free_exclude_active_any() {
4952   Interval* list = active_first(anyKind);
4953   while (list != Interval::end()) {
4954     exclude_from_use(list);
4955     list = list->next();
4956   }
4957 }
4958 
free_collect_inactive_fixed(Interval * cur)4959 void LinearScanWalker::free_collect_inactive_fixed(Interval* cur) {
4960   Interval* list = inactive_first(fixedKind);
4961   while (list != Interval::end()) {
4962     if (cur->to() <= list->current_from()) {
4963       assert(list->current_intersects_at(cur) == -1, "must not intersect");
4964       set_use_pos(list, list->current_from(), true);
4965     } else {
4966       set_use_pos(list, list->current_intersects_at(cur), true);
4967     }
4968     list = list->next();
4969   }
4970 }
4971 
free_collect_inactive_any(Interval * cur)4972 void LinearScanWalker::free_collect_inactive_any(Interval* cur) {
4973   Interval* list = inactive_first(anyKind);
4974   while (list != Interval::end()) {
4975     set_use_pos(list, list->current_intersects_at(cur), true);
4976     list = list->next();
4977   }
4978 }
4979 
spill_exclude_active_fixed()4980 void LinearScanWalker::spill_exclude_active_fixed() {
4981   Interval* list = active_first(fixedKind);
4982   while (list != Interval::end()) {
4983     exclude_from_use(list);
4984     list = list->next();
4985   }
4986 }
4987 
spill_block_inactive_fixed(Interval * cur)4988 void LinearScanWalker::spill_block_inactive_fixed(Interval* cur) {
4989   Interval* list = inactive_first(fixedKind);
4990   while (list != Interval::end()) {
4991     if (cur->to() > list->current_from()) {
4992       set_block_pos(list, list->current_intersects_at(cur));
4993     } else {
4994       assert(list->current_intersects_at(cur) == -1, "invalid optimization: intervals intersect");
4995     }
4996 
4997     list = list->next();
4998   }
4999 }
5000 
spill_collect_active_any()5001 void LinearScanWalker::spill_collect_active_any() {
5002   Interval* list = active_first(anyKind);
5003   while (list != Interval::end()) {
5004     set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false);
5005     list = list->next();
5006   }
5007 }
5008 
spill_collect_inactive_any(Interval * cur)5009 void LinearScanWalker::spill_collect_inactive_any(Interval* cur) {
5010   Interval* list = inactive_first(anyKind);
5011   while (list != Interval::end()) {
5012     if (list->current_intersects(cur)) {
5013       set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false);
5014     }
5015     list = list->next();
5016   }
5017 }
5018 
5019 
insert_move(int op_id,Interval * src_it,Interval * dst_it)5020 void LinearScanWalker::insert_move(int op_id, Interval* src_it, Interval* dst_it) {
5021   // output all moves here. When source and target are equal, the move is
5022   // optimized away later in assign_reg_nums
5023 
5024   op_id = (op_id + 1) & ~1;
5025   BlockBegin* op_block = allocator()->block_of_op_with_id(op_id);
5026   assert(op_id > 0 && allocator()->block_of_op_with_id(op_id - 2) == op_block, "cannot insert move at block boundary");
5027 
5028   // calculate index of instruction inside instruction list of current block
5029   // the minimal index (for a block with no spill moves) can be calculated because the
5030   // numbering of instructions is known.
5031   // When the block already contains spill moves, the index must be increased until the
5032   // correct index is reached.
5033   LIR_OpList* list = op_block->lir()->instructions_list();
5034   int index = (op_id - list->at(0)->id()) / 2;
5035   assert(list->at(index)->id() <= op_id, "error in calculation");
5036 
5037   while (list->at(index)->id() != op_id) {
5038     index++;
5039     assert(0 <= index && index < list->length(), "index out of bounds");
5040   }
5041   assert(1 <= index && index < list->length(), "index out of bounds");
5042   assert(list->at(index)->id() == op_id, "error in calculation");
5043 
5044   // insert new instruction before instruction at position index
5045   _move_resolver.move_insert_position(op_block->lir(), index - 1);
5046   _move_resolver.add_mapping(src_it, dst_it);
5047 }
5048 
5049 
find_optimal_split_pos(BlockBegin * min_block,BlockBegin * max_block,int max_split_pos)5050 int LinearScanWalker::find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos) {
5051   int from_block_nr = min_block->linear_scan_number();
5052   int to_block_nr = max_block->linear_scan_number();
5053 
5054   assert(0 <= from_block_nr && from_block_nr < block_count(), "out of range");
5055   assert(0 <= to_block_nr && to_block_nr < block_count(), "out of range");
5056   assert(from_block_nr < to_block_nr, "must cross block boundary");
5057 
5058   // Try to split at end of max_block. If this would be after
5059   // max_split_pos, then use the begin of max_block
5060   int optimal_split_pos = max_block->last_lir_instruction_id() + 2;
5061   if (optimal_split_pos > max_split_pos) {
5062     optimal_split_pos = max_block->first_lir_instruction_id();
5063   }
5064 
5065   int min_loop_depth = max_block->loop_depth();
5066   for (int i = to_block_nr - 1; i >= from_block_nr; i--) {
5067     BlockBegin* cur = block_at(i);
5068 
5069     if (cur->loop_depth() < min_loop_depth) {
5070       // block with lower loop-depth found -> split at the end of this block
5071       min_loop_depth = cur->loop_depth();
5072       optimal_split_pos = cur->last_lir_instruction_id() + 2;
5073     }
5074   }
5075   assert(optimal_split_pos > allocator()->max_lir_op_id() || allocator()->is_block_begin(optimal_split_pos), "algorithm must move split pos to block boundary");
5076 
5077   return optimal_split_pos;
5078 }
5079 
5080 
find_optimal_split_pos(Interval * it,int min_split_pos,int max_split_pos,bool do_loop_optimization)5081 int LinearScanWalker::find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization) {
5082   int optimal_split_pos = -1;
5083   if (min_split_pos == max_split_pos) {
5084     // trivial case, no optimization of split position possible
5085     TRACE_LINEAR_SCAN(4, tty->print_cr("      min-pos and max-pos are equal, no optimization possible"));
5086     optimal_split_pos = min_split_pos;
5087 
5088   } else {
5089     assert(min_split_pos < max_split_pos, "must be true then");
5090     assert(min_split_pos > 0, "cannot access min_split_pos - 1 otherwise");
5091 
5092     // reason for using min_split_pos - 1: when the minimal split pos is exactly at the
5093     // beginning of a block, then min_split_pos is also a possible split position.
5094     // Use the block before as min_block, because then min_block->last_lir_instruction_id() + 2 == min_split_pos
5095     BlockBegin* min_block = allocator()->block_of_op_with_id(min_split_pos - 1);
5096 
5097     // reason for using max_split_pos - 1: otherwise there would be an assertion failure
5098     // when an interval ends at the end of the last block of the method
5099     // (in this case, max_split_pos == allocator()->max_lir_op_id() + 2, and there is no
5100     // block at this op_id)
5101     BlockBegin* max_block = allocator()->block_of_op_with_id(max_split_pos - 1);
5102 
5103     assert(min_block->linear_scan_number() <= max_block->linear_scan_number(), "invalid order");
5104     if (min_block == max_block) {
5105       // split position cannot be moved to block boundary, so split as late as possible
5106       TRACE_LINEAR_SCAN(4, tty->print_cr("      cannot move split pos to block boundary because min_pos and max_pos are in same block"));
5107       optimal_split_pos = max_split_pos;
5108 
5109     } else if (it->has_hole_between(max_split_pos - 1, max_split_pos) && !allocator()->is_block_begin(max_split_pos)) {
5110       // Do not move split position if the interval has a hole before max_split_pos.
5111       // Intervals resulting from Phi-Functions have more than one definition (marked
5112       // as mustHaveRegister) with a hole before each definition. When the register is needed
5113       // for the second definition, an earlier reloading is unnecessary.
5114       TRACE_LINEAR_SCAN(4, tty->print_cr("      interval has hole just before max_split_pos, so splitting at max_split_pos"));
5115       optimal_split_pos = max_split_pos;
5116 
5117     } else {
5118       // seach optimal block boundary between min_split_pos and max_split_pos
5119       TRACE_LINEAR_SCAN(4, tty->print_cr("      moving split pos to optimal block boundary between block B%d and B%d", min_block->block_id(), max_block->block_id()));
5120 
5121       if (do_loop_optimization) {
5122         // Loop optimization: if a loop-end marker is found between min- and max-position,
5123         // then split before this loop
5124         int loop_end_pos = it->next_usage_exact(loopEndMarker, min_block->last_lir_instruction_id() + 2);
5125         TRACE_LINEAR_SCAN(4, tty->print_cr("      loop optimization: loop end found at pos %d", loop_end_pos));
5126 
5127         assert(loop_end_pos > min_split_pos, "invalid order");
5128         if (loop_end_pos < max_split_pos) {
5129           // loop-end marker found between min- and max-position
5130           // if it is not the end marker for the same loop as the min-position, then move
5131           // the max-position to this loop block.
5132           // Desired result: uses tagged as shouldHaveRegister inside a loop cause a reloading
5133           // of the interval (normally, only mustHaveRegister causes a reloading)
5134           BlockBegin* loop_block = allocator()->block_of_op_with_id(loop_end_pos);
5135 
5136           TRACE_LINEAR_SCAN(4, tty->print_cr("      interval is used in loop that ends in block B%d, so trying to move max_block back from B%d to B%d", loop_block->block_id(), max_block->block_id(), loop_block->block_id()));
5137           assert(loop_block != min_block, "loop_block and min_block must be different because block boundary is needed between");
5138 
5139           optimal_split_pos = find_optimal_split_pos(min_block, loop_block, loop_block->last_lir_instruction_id() + 2);
5140           if (optimal_split_pos == loop_block->last_lir_instruction_id() + 2) {
5141             optimal_split_pos = -1;
5142             TRACE_LINEAR_SCAN(4, tty->print_cr("      loop optimization not necessary"));
5143           } else {
5144             TRACE_LINEAR_SCAN(4, tty->print_cr("      loop optimization successful"));
5145           }
5146         }
5147       }
5148 
5149       if (optimal_split_pos == -1) {
5150         // not calculated by loop optimization
5151         optimal_split_pos = find_optimal_split_pos(min_block, max_block, max_split_pos);
5152       }
5153     }
5154   }
5155   TRACE_LINEAR_SCAN(4, tty->print_cr("      optimal split position: %d", optimal_split_pos));
5156 
5157   return optimal_split_pos;
5158 }
5159 
5160 
5161 /*
5162   split an interval at the optimal position between min_split_pos and
5163   max_split_pos in two parts:
5164   1) the left part has already a location assigned
5165   2) the right part is sorted into to the unhandled-list
5166 */
split_before_usage(Interval * it,int min_split_pos,int max_split_pos)5167 void LinearScanWalker::split_before_usage(Interval* it, int min_split_pos, int max_split_pos) {
5168   TRACE_LINEAR_SCAN(2, tty->print   ("----- splitting interval: "); it->print());
5169   TRACE_LINEAR_SCAN(2, tty->print_cr("      between %d and %d", min_split_pos, max_split_pos));
5170 
5171   assert(it->from() < min_split_pos,         "cannot split at start of interval");
5172   assert(current_position() < min_split_pos, "cannot split before current position");
5173   assert(min_split_pos <= max_split_pos,     "invalid order");
5174   assert(max_split_pos <= it->to(),          "cannot split after end of interval");
5175 
5176   int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, true);
5177 
5178   assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range");
5179   assert(optimal_split_pos <= it->to(),  "cannot split after end of interval");
5180   assert(optimal_split_pos > it->from(), "cannot split at start of interval");
5181 
5182   if (optimal_split_pos == it->to() && it->next_usage(mustHaveRegister, min_split_pos) == max_jint) {
5183     // the split position would be just before the end of the interval
5184     // -> no split at all necessary
5185     TRACE_LINEAR_SCAN(4, tty->print_cr("      no split necessary because optimal split position is at end of interval"));
5186     return;
5187   }
5188 
5189   // must calculate this before the actual split is performed and before split position is moved to odd op_id
5190   bool move_necessary = !allocator()->is_block_begin(optimal_split_pos) && !it->has_hole_between(optimal_split_pos - 1, optimal_split_pos);
5191 
5192   if (!allocator()->is_block_begin(optimal_split_pos)) {
5193     // move position before actual instruction (odd op_id)
5194     optimal_split_pos = (optimal_split_pos - 1) | 1;
5195   }
5196 
5197   TRACE_LINEAR_SCAN(4, tty->print_cr("      splitting at position %d", optimal_split_pos));
5198   assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary");
5199   assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary");
5200 
5201   Interval* split_part = it->split(optimal_split_pos);
5202 
5203   allocator()->append_interval(split_part);
5204   allocator()->copy_register_flags(it, split_part);
5205   split_part->set_insert_move_when_activated(move_necessary);
5206   append_to_unhandled(unhandled_first_addr(anyKind), split_part);
5207 
5208   TRACE_LINEAR_SCAN(2, tty->print_cr("      split interval in two parts (insert_move_when_activated: %d)", move_necessary));
5209   TRACE_LINEAR_SCAN(2, tty->print   ("      "); it->print());
5210   TRACE_LINEAR_SCAN(2, tty->print   ("      "); split_part->print());
5211 }
5212 
5213 /*
5214   split an interval at the optimal position between min_split_pos and
5215   max_split_pos in two parts:
5216   1) the left part has already a location assigned
5217   2) the right part is always on the stack and therefore ignored in further processing
5218 */
split_for_spilling(Interval * it)5219 void LinearScanWalker::split_for_spilling(Interval* it) {
5220   // calculate allowed range of splitting position
5221   int max_split_pos = current_position();
5222   int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, max_split_pos) + 1, it->from());
5223 
5224   TRACE_LINEAR_SCAN(2, tty->print   ("----- splitting and spilling interval: "); it->print());
5225   TRACE_LINEAR_SCAN(2, tty->print_cr("      between %d and %d", min_split_pos, max_split_pos));
5226 
5227   assert(it->state() == activeState,     "why spill interval that is not active?");
5228   assert(it->from() <= min_split_pos,    "cannot split before start of interval");
5229   assert(min_split_pos <= max_split_pos, "invalid order");
5230   assert(max_split_pos < it->to(),       "cannot split at end end of interval");
5231   assert(current_position() < it->to(),  "interval must not end before current position");
5232 
5233   if (min_split_pos == it->from()) {
5234     // the whole interval is never used, so spill it entirely to memory
5235     TRACE_LINEAR_SCAN(2, tty->print_cr("      spilling entire interval because split pos is at beginning of interval"));
5236     assert(it->first_usage(shouldHaveRegister) > current_position(), "interval must not have use position before current_position");
5237 
5238     allocator()->assign_spill_slot(it);
5239     allocator()->change_spill_state(it, min_split_pos);
5240 
5241     // Also kick parent intervals out of register to memory when they have no use
5242     // position. This avoids short interval in register surrounded by intervals in
5243     // memory -> avoid useless moves from memory to register and back
5244     Interval* parent = it;
5245     while (parent != NULL && parent->is_split_child()) {
5246       parent = parent->split_child_before_op_id(parent->from());
5247 
5248       if (parent->assigned_reg() < LinearScan::nof_regs) {
5249         if (parent->first_usage(shouldHaveRegister) == max_jint) {
5250           // parent is never used, so kick it out of its assigned register
5251           TRACE_LINEAR_SCAN(4, tty->print_cr("      kicking out interval %d out of its register because it is never used", parent->reg_num()));
5252           allocator()->assign_spill_slot(parent);
5253         } else {
5254           // do not go further back because the register is actually used by the interval
5255           parent = NULL;
5256         }
5257       }
5258     }
5259 
5260   } else {
5261     // search optimal split pos, split interval and spill only the right hand part
5262     int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, false);
5263 
5264     assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range");
5265     assert(optimal_split_pos < it->to(), "cannot split at end of interval");
5266     assert(optimal_split_pos >= it->from(), "cannot split before start of interval");
5267 
5268     if (!allocator()->is_block_begin(optimal_split_pos)) {
5269       // move position before actual instruction (odd op_id)
5270       optimal_split_pos = (optimal_split_pos - 1) | 1;
5271     }
5272 
5273     TRACE_LINEAR_SCAN(4, tty->print_cr("      splitting at position %d", optimal_split_pos));
5274     assert(allocator()->is_block_begin(optimal_split_pos)  || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary");
5275     assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary");
5276 
5277     Interval* spilled_part = it->split(optimal_split_pos);
5278     allocator()->append_interval(spilled_part);
5279     allocator()->assign_spill_slot(spilled_part);
5280     allocator()->change_spill_state(spilled_part, optimal_split_pos);
5281 
5282     if (!allocator()->is_block_begin(optimal_split_pos)) {
5283       TRACE_LINEAR_SCAN(4, tty->print_cr("      inserting move from interval %d to %d", it->reg_num(), spilled_part->reg_num()));
5284       insert_move(optimal_split_pos, it, spilled_part);
5285     }
5286 
5287     // the current_split_child is needed later when moves are inserted for reloading
5288     assert(spilled_part->current_split_child() == it, "overwriting wrong current_split_child");
5289     spilled_part->make_current_split_child();
5290 
5291     TRACE_LINEAR_SCAN(2, tty->print_cr("      split interval in two parts"));
5292     TRACE_LINEAR_SCAN(2, tty->print   ("      "); it->print());
5293     TRACE_LINEAR_SCAN(2, tty->print   ("      "); spilled_part->print());
5294   }
5295 }
5296 
5297 
split_stack_interval(Interval * it)5298 void LinearScanWalker::split_stack_interval(Interval* it) {
5299   int min_split_pos = current_position() + 1;
5300   int max_split_pos = MIN2(it->first_usage(shouldHaveRegister), it->to());
5301 
5302   split_before_usage(it, min_split_pos, max_split_pos);
5303 }
5304 
split_when_partial_register_available(Interval * it,int register_available_until)5305 void LinearScanWalker::split_when_partial_register_available(Interval* it, int register_available_until) {
5306   int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, register_available_until), it->from() + 1);
5307   int max_split_pos = register_available_until;
5308 
5309   split_before_usage(it, min_split_pos, max_split_pos);
5310 }
5311 
split_and_spill_interval(Interval * it)5312 void LinearScanWalker::split_and_spill_interval(Interval* it) {
5313   assert(it->state() == activeState || it->state() == inactiveState, "other states not allowed");
5314 
5315   int current_pos = current_position();
5316   if (it->state() == inactiveState) {
5317     // the interval is currently inactive, so no spill slot is needed for now.
5318     // when the split part is activated, the interval has a new chance to get a register,
5319     // so in the best case no stack slot is necessary
5320     assert(it->has_hole_between(current_pos - 1, current_pos + 1), "interval can not be inactive otherwise");
5321     split_before_usage(it, current_pos + 1, current_pos + 1);
5322 
5323   } else {
5324     // search the position where the interval must have a register and split
5325     // at the optimal position before.
5326     // The new created part is added to the unhandled list and will get a register
5327     // when it is activated
5328     int min_split_pos = current_pos + 1;
5329     int max_split_pos = MIN2(it->next_usage(mustHaveRegister, min_split_pos), it->to());
5330 
5331     split_before_usage(it, min_split_pos, max_split_pos);
5332 
5333     assert(it->next_usage(mustHaveRegister, current_pos) == max_jint, "the remaining part is spilled to stack and therefore has no register");
5334     split_for_spilling(it);
5335   }
5336 }
5337 
5338 
find_free_reg(int reg_needed_until,int interval_to,int hint_reg,int ignore_reg,bool * need_split)5339 int LinearScanWalker::find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) {
5340   int min_full_reg = any_reg;
5341   int max_partial_reg = any_reg;
5342 
5343   for (int i = _first_reg; i <= _last_reg; i++) {
5344     if (i == ignore_reg) {
5345       // this register must be ignored
5346 
5347     } else if (_use_pos[i] >= interval_to) {
5348       // this register is free for the full interval
5349       if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) {
5350         min_full_reg = i;
5351       }
5352     } else if (_use_pos[i] > reg_needed_until) {
5353       // this register is at least free until reg_needed_until
5354       if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) {
5355         max_partial_reg = i;
5356       }
5357     }
5358   }
5359 
5360   if (min_full_reg != any_reg) {
5361     return min_full_reg;
5362   } else if (max_partial_reg != any_reg) {
5363     *need_split = true;
5364     return max_partial_reg;
5365   } else {
5366     return any_reg;
5367   }
5368 }
5369 
find_free_double_reg(int reg_needed_until,int interval_to,int hint_reg,bool * need_split)5370 int LinearScanWalker::find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) {
5371   assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm");
5372 
5373   int min_full_reg = any_reg;
5374   int max_partial_reg = any_reg;
5375 
5376   for (int i = _first_reg; i < _last_reg; i+=2) {
5377     if (_use_pos[i] >= interval_to && _use_pos[i + 1] >= interval_to) {
5378       // this register is free for the full interval
5379       if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) {
5380         min_full_reg = i;
5381       }
5382     } else if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) {
5383       // this register is at least free until reg_needed_until
5384       if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) {
5385         max_partial_reg = i;
5386       }
5387     }
5388   }
5389 
5390   if (min_full_reg != any_reg) {
5391     return min_full_reg;
5392   } else if (max_partial_reg != any_reg) {
5393     *need_split = true;
5394     return max_partial_reg;
5395   } else {
5396     return any_reg;
5397   }
5398 }
5399 
5400 
alloc_free_reg(Interval * cur)5401 bool LinearScanWalker::alloc_free_reg(Interval* cur) {
5402   TRACE_LINEAR_SCAN(2, tty->print("trying to find free register for "); cur->print());
5403 
5404   init_use_lists(true);
5405   free_exclude_active_fixed();
5406   free_exclude_active_any();
5407   free_collect_inactive_fixed(cur);
5408   free_collect_inactive_any(cur);
5409   assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5410 
5411   // _use_pos contains the start of the next interval that has this register assigned
5412   // (either as a fixed register or a normal allocated register in the past)
5413   // only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely
5414   TRACE_LINEAR_SCAN(4, tty->print_cr("      state of registers:"));
5415   TRACE_LINEAR_SCAN(4, for (int i = _first_reg; i <= _last_reg; i++) tty->print_cr("      reg %d: use_pos: %d", i, _use_pos[i]));
5416 
5417   int hint_reg, hint_regHi;
5418   Interval* register_hint = cur->register_hint();
5419   if (register_hint != NULL) {
5420     hint_reg = register_hint->assigned_reg();
5421     hint_regHi = register_hint->assigned_regHi();
5422 
5423     if (allocator()->is_precolored_cpu_interval(register_hint)) {
5424       assert(hint_reg != any_reg && hint_regHi == any_reg, "must be for fixed intervals");
5425       hint_regHi = hint_reg + 1;  // connect e.g. eax-edx
5426     }
5427     TRACE_LINEAR_SCAN(4, tty->print("      hint registers %d, %d from interval ", hint_reg, hint_regHi); register_hint->print());
5428 
5429   } else {
5430     hint_reg = any_reg;
5431     hint_regHi = any_reg;
5432   }
5433   assert(hint_reg == any_reg || hint_reg != hint_regHi, "hint reg and regHi equal");
5434   assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned to interval");
5435 
5436   // the register must be free at least until this position
5437   int reg_needed_until = cur->from() + 1;
5438   int interval_to = cur->to();
5439 
5440   bool need_split = false;
5441   int split_pos;
5442   int reg;
5443   int regHi = any_reg;
5444 
5445   if (_adjacent_regs) {
5446     reg = find_free_double_reg(reg_needed_until, interval_to, hint_reg, &need_split);
5447     regHi = reg + 1;
5448     if (reg == any_reg) {
5449       return false;
5450     }
5451     split_pos = MIN2(_use_pos[reg], _use_pos[regHi]);
5452 
5453   } else {
5454     reg = find_free_reg(reg_needed_until, interval_to, hint_reg, any_reg, &need_split);
5455     if (reg == any_reg) {
5456       return false;
5457     }
5458     split_pos = _use_pos[reg];
5459 
5460     if (_num_phys_regs == 2) {
5461       regHi = find_free_reg(reg_needed_until, interval_to, hint_regHi, reg, &need_split);
5462 
5463       if (_use_pos[reg] < interval_to && regHi == any_reg) {
5464         // do not split interval if only one register can be assigned until the split pos
5465         // (when one register is found for the whole interval, split&spill is only
5466         // performed for the hi register)
5467         return false;
5468 
5469       } else if (regHi != any_reg) {
5470         split_pos = MIN2(split_pos, _use_pos[regHi]);
5471 
5472         // sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax
5473         if (reg > regHi) {
5474           int temp = reg;
5475           reg = regHi;
5476           regHi = temp;
5477         }
5478       }
5479     }
5480   }
5481 
5482   cur->assign_reg(reg, regHi);
5483   TRACE_LINEAR_SCAN(2, tty->print_cr("selected register %d, %d", reg, regHi));
5484 
5485   assert(split_pos > 0, "invalid split_pos");
5486   if (need_split) {
5487     // register not available for full interval, so split it
5488     split_when_partial_register_available(cur, split_pos);
5489   }
5490 
5491   // only return true if interval is completely assigned
5492   return _num_phys_regs == 1 || regHi != any_reg;
5493 }
5494 
5495 
find_locked_reg(int reg_needed_until,int interval_to,int ignore_reg,bool * need_split)5496 int LinearScanWalker::find_locked_reg(int reg_needed_until, int interval_to, int ignore_reg, bool* need_split) {
5497   int max_reg = any_reg;
5498 
5499   for (int i = _first_reg; i <= _last_reg; i++) {
5500     if (i == ignore_reg) {
5501       // this register must be ignored
5502 
5503     } else if (_use_pos[i] > reg_needed_until) {
5504       if (max_reg == any_reg || _use_pos[i] > _use_pos[max_reg]) {
5505         max_reg = i;
5506       }
5507     }
5508   }
5509 
5510   if (max_reg != any_reg && _block_pos[max_reg] <= interval_to) {
5511     *need_split = true;
5512   }
5513 
5514   return max_reg;
5515 }
5516 
find_locked_double_reg(int reg_needed_until,int interval_to,bool * need_split)5517 int LinearScanWalker::find_locked_double_reg(int reg_needed_until, int interval_to, bool* need_split) {
5518   assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm");
5519 
5520   int max_reg = any_reg;
5521 
5522   for (int i = _first_reg; i < _last_reg; i+=2) {
5523     if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) {
5524       if (max_reg == any_reg || _use_pos[i] > _use_pos[max_reg]) {
5525         max_reg = i;
5526       }
5527     }
5528   }
5529 
5530   if (max_reg != any_reg &&
5531       (_block_pos[max_reg] <= interval_to || _block_pos[max_reg + 1] <= interval_to)) {
5532     *need_split = true;
5533   }
5534 
5535   return max_reg;
5536 }
5537 
split_and_spill_intersecting_intervals(int reg,int regHi)5538 void LinearScanWalker::split_and_spill_intersecting_intervals(int reg, int regHi) {
5539   assert(reg != any_reg, "no register assigned");
5540 
5541   for (int i = 0; i < _spill_intervals[reg]->length(); i++) {
5542     Interval* it = _spill_intervals[reg]->at(i);
5543     remove_from_list(it);
5544     split_and_spill_interval(it);
5545   }
5546 
5547   if (regHi != any_reg) {
5548     IntervalList* processed = _spill_intervals[reg];
5549     for (int i = 0; i < _spill_intervals[regHi]->length(); i++) {
5550       Interval* it = _spill_intervals[regHi]->at(i);
5551       if (processed->find(it) == -1) {
5552         remove_from_list(it);
5553         split_and_spill_interval(it);
5554       }
5555     }
5556   }
5557 }
5558 
5559 
5560 // Split an Interval and spill it to memory so that cur can be placed in a register
alloc_locked_reg(Interval * cur)5561 void LinearScanWalker::alloc_locked_reg(Interval* cur) {
5562   TRACE_LINEAR_SCAN(2, tty->print("need to split and spill to get register for "); cur->print());
5563 
5564   // collect current usage of registers
5565   init_use_lists(false);
5566   spill_exclude_active_fixed();
5567   assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5568   spill_block_inactive_fixed(cur);
5569   spill_collect_active_any();
5570   spill_collect_inactive_any(cur);
5571 
5572 #ifndef PRODUCT
5573   if (TraceLinearScanLevel >= 4) {
5574     tty->print_cr("      state of registers:");
5575     for (int i = _first_reg; i <= _last_reg; i++) {
5576       tty->print("      reg %d: use_pos: %d, block_pos: %d, intervals: ", i, _use_pos[i], _block_pos[i]);
5577       for (int j = 0; j < _spill_intervals[i]->length(); j++) {
5578         tty->print("%d ", _spill_intervals[i]->at(j)->reg_num());
5579       }
5580       tty->cr();
5581     }
5582   }
5583 #endif
5584 
5585   // the register must be free at least until this position
5586   int reg_needed_until = MIN2(cur->first_usage(mustHaveRegister), cur->from() + 1);
5587   int interval_to = cur->to();
5588   assert (reg_needed_until > 0 && reg_needed_until < max_jint, "interval has no use");
5589 
5590   int split_pos = 0;
5591   int use_pos = 0;
5592   bool need_split = false;
5593   int reg, regHi;
5594 
5595   if (_adjacent_regs) {
5596     reg = find_locked_double_reg(reg_needed_until, interval_to, &need_split);
5597     regHi = reg + 1;
5598 
5599     if (reg != any_reg) {
5600       use_pos = MIN2(_use_pos[reg], _use_pos[regHi]);
5601       split_pos = MIN2(_block_pos[reg], _block_pos[regHi]);
5602     }
5603   } else {
5604     reg = find_locked_reg(reg_needed_until, interval_to, cur->assigned_reg(), &need_split);
5605     regHi = any_reg;
5606 
5607     if (reg != any_reg) {
5608       use_pos = _use_pos[reg];
5609       split_pos = _block_pos[reg];
5610 
5611       if (_num_phys_regs == 2) {
5612         if (cur->assigned_reg() != any_reg) {
5613           regHi = reg;
5614           reg = cur->assigned_reg();
5615         } else {
5616           regHi = find_locked_reg(reg_needed_until, interval_to, reg, &need_split);
5617           if (regHi != any_reg) {
5618             use_pos = MIN2(use_pos, _use_pos[regHi]);
5619             split_pos = MIN2(split_pos, _block_pos[regHi]);
5620           }
5621         }
5622 
5623         if (regHi != any_reg && reg > regHi) {
5624           // sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax
5625           int temp = reg;
5626           reg = regHi;
5627           regHi = temp;
5628         }
5629       }
5630     }
5631   }
5632 
5633   if (reg == any_reg || (_num_phys_regs == 2 && regHi == any_reg) || use_pos <= cur->first_usage(mustHaveRegister)) {
5634     // the first use of cur is later than the spilling position -> spill cur
5635     TRACE_LINEAR_SCAN(4, tty->print_cr("able to spill current interval. first_usage(register): %d, use_pos: %d", cur->first_usage(mustHaveRegister), use_pos));
5636 
5637     if (cur->first_usage(mustHaveRegister) <= cur->from() + 1) {
5638       assert(false, "cannot spill interval that is used in first instruction (possible reason: no register found)");
5639       // assign a reasonable register and do a bailout in product mode to avoid errors
5640       allocator()->assign_spill_slot(cur);
5641       BAILOUT("LinearScan: no register found");
5642     }
5643 
5644     split_and_spill_interval(cur);
5645   } else {
5646     TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi));
5647     assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found");
5648     assert(split_pos > 0, "invalid split_pos");
5649     assert(need_split == false || split_pos > cur->from(), "splitting interval at from");
5650 
5651     cur->assign_reg(reg, regHi);
5652     if (need_split) {
5653       // register not available for full interval, so split it
5654       split_when_partial_register_available(cur, split_pos);
5655     }
5656 
5657     // perform splitting and spilling for all affected intervalls
5658     split_and_spill_intersecting_intervals(reg, regHi);
5659   }
5660 }
5661 
no_allocation_possible(Interval * cur)5662 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
5663 #ifdef X86
5664   // fast calculation of intervals that can never get a register because the
5665   // the next instruction is a call that blocks all registers
5666   // Note: this does not work if callee-saved registers are available (e.g. on Sparc)
5667 
5668   // check if this interval is the result of a split operation
5669   // (an interval got a register until this position)
5670   int pos = cur->from();
5671   if ((pos & 1) == 1) {
5672     // the current instruction is a call that blocks all registers
5673     if (pos < allocator()->max_lir_op_id() && allocator()->has_call(pos + 1)) {
5674       TRACE_LINEAR_SCAN(4, tty->print_cr("      free register cannot be available because all registers blocked by following call"));
5675 
5676       // safety check that there is really no register available
5677       assert(alloc_free_reg(cur) == false, "found a register for this interval");
5678       return true;
5679     }
5680 
5681   }
5682 #endif
5683   return false;
5684 }
5685 
init_vars_for_alloc(Interval * cur)5686 void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
5687   BasicType type = cur->type();
5688   _num_phys_regs = LinearScan::num_physical_regs(type);
5689   _adjacent_regs = LinearScan::requires_adjacent_regs(type);
5690 
5691   if (pd_init_regs_for_alloc(cur)) {
5692     // the appropriate register range was selected.
5693   } else if (type == T_FLOAT || type == T_DOUBLE) {
5694     _first_reg = pd_first_fpu_reg;
5695     _last_reg = pd_last_fpu_reg;
5696   } else {
5697     _first_reg = pd_first_cpu_reg;
5698     _last_reg = FrameMap::last_cpu_reg();
5699   }
5700 
5701   assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
5702   assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range");
5703 }
5704 
5705 
is_move(LIR_Op * op,Interval * from,Interval * to)5706 bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) {
5707   if (op->code() != lir_move) {
5708     return false;
5709   }
5710   assert(op->as_Op1() != NULL, "move must be LIR_Op1");
5711 
5712   LIR_Opr in = ((LIR_Op1*)op)->in_opr();
5713   LIR_Opr res = ((LIR_Op1*)op)->result_opr();
5714   return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
5715 }
5716 
5717 // optimization (especially for phi functions of nested loops):
5718 // assign same spill slot to non-intersecting intervals
combine_spilled_intervals(Interval * cur)5719 void LinearScanWalker::combine_spilled_intervals(Interval* cur) {
5720   if (cur->is_split_child()) {
5721     // optimization is only suitable for split parents
5722     return;
5723   }
5724 
5725   Interval* register_hint = cur->register_hint(false);
5726   if (register_hint == NULL) {
5727     // cur is not the target of a move, otherwise register_hint would be set
5728     return;
5729   }
5730   assert(register_hint->is_split_parent(), "register hint must be split parent");
5731 
5732   if (cur->spill_state() != noOptimization || register_hint->spill_state() != noOptimization) {
5733     // combining the stack slots for intervals where spill move optimization is applied
5734     // is not benefitial and would cause problems
5735     return;
5736   }
5737 
5738   int begin_pos = cur->from();
5739   int end_pos = cur->to();
5740   if (end_pos > allocator()->max_lir_op_id() || (begin_pos & 1) != 0 || (end_pos & 1) != 0) {
5741     // safety check that lir_op_with_id is allowed
5742     return;
5743   }
5744 
5745   if (!is_move(allocator()->lir_op_with_id(begin_pos), register_hint, cur) || !is_move(allocator()->lir_op_with_id(end_pos), cur, register_hint)) {
5746     // cur and register_hint are not connected with two moves
5747     return;
5748   }
5749 
5750   Interval* begin_hint = register_hint->split_child_at_op_id(begin_pos, LIR_OpVisitState::inputMode);
5751   Interval* end_hint = register_hint->split_child_at_op_id(end_pos, LIR_OpVisitState::outputMode);
5752   if (begin_hint == end_hint || begin_hint->to() != begin_pos || end_hint->from() != end_pos) {
5753     // register_hint must be split, otherwise the re-writing of use positions does not work
5754     return;
5755   }
5756 
5757   assert(begin_hint->assigned_reg() != any_reg, "must have register assigned");
5758   assert(end_hint->assigned_reg() == any_reg, "must not have register assigned");
5759   assert(cur->first_usage(mustHaveRegister) == begin_pos, "must have use position at begin of interval because of move");
5760   assert(end_hint->first_usage(mustHaveRegister) == end_pos, "must have use position at begin of interval because of move");
5761 
5762   if (begin_hint->assigned_reg() < LinearScan::nof_regs) {
5763     // register_hint is not spilled at begin_pos, so it would not be benefitial to immediately spill cur
5764     return;
5765   }
5766   assert(register_hint->canonical_spill_slot() != -1, "must be set when part of interval was spilled");
5767   assert(!cur->intersects(register_hint), "cur should not intersect register_hint");
5768 
5769   if (cur->intersects_any_children_of(register_hint)) {
5770     // Bail out if cur intersects any split children of register_hint, which have the same spill slot as their parent. An overlap of two intervals with
5771     // the same spill slot could result in a situation where both intervals are spilled at the same time to the same stack location which is not correct.
5772     return;
5773   }
5774 
5775   // modify intervals such that cur gets the same stack slot as register_hint
5776   // delete use positions to prevent the intervals to get a register at beginning
5777   cur->set_canonical_spill_slot(register_hint->canonical_spill_slot());
5778   cur->remove_first_use_pos();
5779   end_hint->remove_first_use_pos();
5780 }
5781 
5782 
5783 // allocate a physical register or memory location to an interval
activate_current()5784 bool LinearScanWalker::activate_current() {
5785   Interval* cur = current();
5786   bool result = true;
5787 
5788   TRACE_LINEAR_SCAN(2, tty->print   ("+++++ activating interval "); cur->print());
5789   TRACE_LINEAR_SCAN(4, tty->print_cr("      split_parent: %d, insert_move_when_activated: %d", cur->split_parent()->reg_num(), cur->insert_move_when_activated()));
5790 
5791   if (cur->assigned_reg() >= LinearScan::nof_regs) {
5792     // activating an interval that has a stack slot assigned -> split it at first use position
5793     // used for method parameters
5794     TRACE_LINEAR_SCAN(4, tty->print_cr("      interval has spill slot assigned (method parameter) -> split it before first use"));
5795 
5796     split_stack_interval(cur);
5797     result = false;
5798 
5799   } else if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::must_start_in_memory)) {
5800     // activating an interval that must start in a stack slot, but may get a register later
5801     // used for lir_roundfp: rounding is done by store to stack and reload later
5802     TRACE_LINEAR_SCAN(4, tty->print_cr("      interval must start in stack slot -> split it before first use"));
5803     assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned");
5804 
5805     allocator()->assign_spill_slot(cur);
5806     split_stack_interval(cur);
5807     result = false;
5808 
5809   } else if (cur->assigned_reg() == any_reg) {
5810     // interval has not assigned register -> normal allocation
5811     // (this is the normal case for most intervals)
5812     TRACE_LINEAR_SCAN(4, tty->print_cr("      normal allocation of register"));
5813 
5814     // assign same spill slot to non-intersecting intervals
5815     combine_spilled_intervals(cur);
5816 
5817     init_vars_for_alloc(cur);
5818     if (no_allocation_possible(cur) || !alloc_free_reg(cur)) {
5819       // no empty register available.
5820       // split and spill another interval so that this interval gets a register
5821       alloc_locked_reg(cur);
5822     }
5823 
5824     // spilled intervals need not be move to active-list
5825     if (cur->assigned_reg() >= LinearScan::nof_regs) {
5826       result = false;
5827     }
5828   }
5829 
5830   // load spilled values that become active from stack slot to register
5831   if (cur->insert_move_when_activated()) {
5832     assert(cur->is_split_child(), "must be");
5833     assert(cur->current_split_child() != NULL, "must be");
5834     assert(cur->current_split_child()->reg_num() != cur->reg_num(), "cannot insert move between same interval");
5835     TRACE_LINEAR_SCAN(4, tty->print_cr("Inserting move from interval %d to %d because insert_move_when_activated is set", cur->current_split_child()->reg_num(), cur->reg_num()));
5836 
5837     insert_move(cur->from(), cur->current_split_child(), cur);
5838   }
5839   cur->make_current_split_child();
5840 
5841   return result; // true = interval is moved to active list
5842 }
5843 
5844 
5845 // Implementation of EdgeMoveOptimizer
5846 
EdgeMoveOptimizer()5847 EdgeMoveOptimizer::EdgeMoveOptimizer() :
5848   _edge_instructions(4),
5849   _edge_instructions_idx(4)
5850 {
5851 }
5852 
optimize(BlockList * code)5853 void EdgeMoveOptimizer::optimize(BlockList* code) {
5854   EdgeMoveOptimizer optimizer = EdgeMoveOptimizer();
5855 
5856   // ignore the first block in the list (index 0 is not processed)
5857   for (int i = code->length() - 1; i >= 1; i--) {
5858     BlockBegin* block = code->at(i);
5859 
5860     if (block->number_of_preds() > 1 && !block->is_set(BlockBegin::exception_entry_flag)) {
5861       optimizer.optimize_moves_at_block_end(block);
5862     }
5863     if (block->number_of_sux() == 2) {
5864       optimizer.optimize_moves_at_block_begin(block);
5865     }
5866   }
5867 }
5868 
5869 
5870 // clear all internal data structures
init_instructions()5871 void EdgeMoveOptimizer::init_instructions() {
5872   _edge_instructions.clear();
5873   _edge_instructions_idx.clear();
5874 }
5875 
5876 // append a lir-instruction-list and the index of the current operation in to the list
append_instructions(LIR_OpList * instructions,int instructions_idx)5877 void EdgeMoveOptimizer::append_instructions(LIR_OpList* instructions, int instructions_idx) {
5878   _edge_instructions.append(instructions);
5879   _edge_instructions_idx.append(instructions_idx);
5880 }
5881 
5882 // return the current operation of the given edge (predecessor or successor)
instruction_at(int edge)5883 LIR_Op* EdgeMoveOptimizer::instruction_at(int edge) {
5884   LIR_OpList* instructions = _edge_instructions.at(edge);
5885   int idx = _edge_instructions_idx.at(edge);
5886 
5887   if (idx < instructions->length()) {
5888     return instructions->at(idx);
5889   } else {
5890     return NULL;
5891   }
5892 }
5893 
5894 // removes the current operation of the given edge (predecessor or successor)
remove_cur_instruction(int edge,bool decrement_index)5895 void EdgeMoveOptimizer::remove_cur_instruction(int edge, bool decrement_index) {
5896   LIR_OpList* instructions = _edge_instructions.at(edge);
5897   int idx = _edge_instructions_idx.at(edge);
5898   instructions->remove_at(idx);
5899 
5900   if (decrement_index) {
5901     _edge_instructions_idx.at_put(edge, idx - 1);
5902   }
5903 }
5904 
5905 
operations_different(LIR_Op * op1,LIR_Op * op2)5906 bool EdgeMoveOptimizer::operations_different(LIR_Op* op1, LIR_Op* op2) {
5907   if (op1 == NULL || op2 == NULL) {
5908     // at least one block is already empty -> no optimization possible
5909     return true;
5910   }
5911 
5912   if (op1->code() == lir_move && op2->code() == lir_move) {
5913     assert(op1->as_Op1() != NULL, "move must be LIR_Op1");
5914     assert(op2->as_Op1() != NULL, "move must be LIR_Op1");
5915     LIR_Op1* move1 = (LIR_Op1*)op1;
5916     LIR_Op1* move2 = (LIR_Op1*)op2;
5917     if (move1->info() == move2->info() && move1->in_opr() == move2->in_opr() && move1->result_opr() == move2->result_opr()) {
5918       // these moves are exactly equal and can be optimized
5919       return false;
5920     }
5921 
5922   } else if (op1->code() == lir_fxch && op2->code() == lir_fxch) {
5923     assert(op1->as_Op1() != NULL, "fxch must be LIR_Op1");
5924     assert(op2->as_Op1() != NULL, "fxch must be LIR_Op1");
5925     LIR_Op1* fxch1 = (LIR_Op1*)op1;
5926     LIR_Op1* fxch2 = (LIR_Op1*)op2;
5927     if (fxch1->in_opr()->as_jint() == fxch2->in_opr()->as_jint()) {
5928       // equal FPU stack operations can be optimized
5929       return false;
5930     }
5931 
5932   } else if (op1->code() == lir_fpop_raw && op2->code() == lir_fpop_raw) {
5933     // equal FPU stack operations can be optimized
5934     return false;
5935   }
5936 
5937   // no optimization possible
5938   return true;
5939 }
5940 
optimize_moves_at_block_end(BlockBegin * block)5941 void EdgeMoveOptimizer::optimize_moves_at_block_end(BlockBegin* block) {
5942   TRACE_LINEAR_SCAN(4, tty->print_cr("optimizing moves at end of block B%d", block->block_id()));
5943 
5944   if (block->is_predecessor(block)) {
5945     // currently we can't handle this correctly.
5946     return;
5947   }
5948 
5949   init_instructions();
5950   int num_preds = block->number_of_preds();
5951   assert(num_preds > 1, "do not call otherwise");
5952   assert(!block->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed");
5953 
5954   // setup a list with the lir-instructions of all predecessors
5955   int i;
5956   for (i = 0; i < num_preds; i++) {
5957     BlockBegin* pred = block->pred_at(i);
5958     LIR_OpList* pred_instructions = pred->lir()->instructions_list();
5959 
5960     if (pred->number_of_sux() != 1) {
5961       // this can happen with switch-statements where multiple edges are between
5962       // the same blocks.
5963       return;
5964     }
5965 
5966     assert(pred->number_of_sux() == 1, "can handle only one successor");
5967     assert(pred->sux_at(0) == block, "invalid control flow");
5968     assert(pred_instructions->last()->code() == lir_branch, "block with successor must end with branch");
5969     assert(pred_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
5970     assert(pred_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch");
5971 
5972     if (pred_instructions->last()->info() != NULL) {
5973       // can not optimize instructions when debug info is needed
5974       return;
5975     }
5976 
5977     // ignore the unconditional branch at the end of the block
5978     append_instructions(pred_instructions, pred_instructions->length() - 2);
5979   }
5980 
5981 
5982   // process lir-instructions while all predecessors end with the same instruction
5983   while (true) {
5984     LIR_Op* op = instruction_at(0);
5985     for (i = 1; i < num_preds; i++) {
5986       if (operations_different(op, instruction_at(i))) {
5987         // these instructions are different and cannot be optimized ->
5988         // no further optimization possible
5989         return;
5990       }
5991     }
5992 
5993     TRACE_LINEAR_SCAN(4, tty->print("found instruction that is equal in all %d predecessors: ", num_preds); op->print());
5994 
5995     // insert the instruction at the beginning of the current block
5996     block->lir()->insert_before(1, op);
5997 
5998     // delete the instruction at the end of all predecessors
5999     for (i = 0; i < num_preds; i++) {
6000       remove_cur_instruction(i, true);
6001     }
6002   }
6003 }
6004 
6005 
optimize_moves_at_block_begin(BlockBegin * block)6006 void EdgeMoveOptimizer::optimize_moves_at_block_begin(BlockBegin* block) {
6007   TRACE_LINEAR_SCAN(4, tty->print_cr("optimization moves at begin of block B%d", block->block_id()));
6008 
6009   init_instructions();
6010   int num_sux = block->number_of_sux();
6011 
6012   LIR_OpList* cur_instructions = block->lir()->instructions_list();
6013 
6014   assert(num_sux == 2, "method should not be called otherwise");
6015   assert(cur_instructions->last()->code() == lir_branch, "block with successor must end with branch");
6016   assert(cur_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
6017   assert(cur_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch");
6018 
6019   if (cur_instructions->last()->info() != NULL) {
6020     // can no optimize instructions when debug info is needed
6021     return;
6022   }
6023 
6024   LIR_Op* branch = cur_instructions->at(cur_instructions->length() - 2);
6025   if (branch->info() != NULL || (branch->code() != lir_branch && branch->code() != lir_cond_float_branch)) {
6026     // not a valid case for optimization
6027     // currently, only blocks that end with two branches (conditional branch followed
6028     // by unconditional branch) are optimized
6029     return;
6030   }
6031 
6032   // now it is guaranteed that the block ends with two branch instructions.
6033   // the instructions are inserted at the end of the block before these two branches
6034   int insert_idx = cur_instructions->length() - 2;
6035 
6036   int i;
6037 #ifdef ASSERT
6038   for (i = insert_idx - 1; i >= 0; i--) {
6039     LIR_Op* op = cur_instructions->at(i);
6040     if ((op->code() == lir_branch || op->code() == lir_cond_float_branch) && ((LIR_OpBranch*)op)->block() != NULL) {
6041       assert(false, "block with two successors can have only two branch instructions");
6042     }
6043   }
6044 #endif
6045 
6046   // setup a list with the lir-instructions of all successors
6047   for (i = 0; i < num_sux; i++) {
6048     BlockBegin* sux = block->sux_at(i);
6049     LIR_OpList* sux_instructions = sux->lir()->instructions_list();
6050 
6051     assert(sux_instructions->at(0)->code() == lir_label, "block must start with label");
6052 
6053     if (sux->number_of_preds() != 1) {
6054       // this can happen with switch-statements where multiple edges are between
6055       // the same blocks.
6056       return;
6057     }
6058     assert(sux->pred_at(0) == block, "invalid control flow");
6059     assert(!sux->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed");
6060 
6061     // ignore the label at the beginning of the block
6062     append_instructions(sux_instructions, 1);
6063   }
6064 
6065   // process lir-instructions while all successors begin with the same instruction
6066   while (true) {
6067     LIR_Op* op = instruction_at(0);
6068     for (i = 1; i < num_sux; i++) {
6069       if (operations_different(op, instruction_at(i))) {
6070         // these instructions are different and cannot be optimized ->
6071         // no further optimization possible
6072         return;
6073       }
6074     }
6075 
6076     TRACE_LINEAR_SCAN(4, tty->print("----- found instruction that is equal in all %d successors: ", num_sux); op->print());
6077 
6078     // insert instruction at end of current block
6079     block->lir()->insert_before(insert_idx, op);
6080     insert_idx++;
6081 
6082     // delete the instructions at the beginning of all successors
6083     for (i = 0; i < num_sux; i++) {
6084       remove_cur_instruction(i, false);
6085     }
6086   }
6087 }
6088 
6089 
6090 // Implementation of ControlFlowOptimizer
6091 
ControlFlowOptimizer()6092 ControlFlowOptimizer::ControlFlowOptimizer() :
6093   _original_preds(4)
6094 {
6095 }
6096 
optimize(BlockList * code)6097 void ControlFlowOptimizer::optimize(BlockList* code) {
6098   ControlFlowOptimizer optimizer = ControlFlowOptimizer();
6099 
6100   // push the OSR entry block to the end so that we're not jumping over it.
6101   BlockBegin* osr_entry = code->at(0)->end()->as_Base()->osr_entry();
6102   if (osr_entry) {
6103     int index = osr_entry->linear_scan_number();
6104     assert(code->at(index) == osr_entry, "wrong index");
6105     code->remove_at(index);
6106     code->append(osr_entry);
6107   }
6108 
6109   optimizer.reorder_short_loops(code);
6110   optimizer.delete_empty_blocks(code);
6111   optimizer.delete_unnecessary_jumps(code);
6112   optimizer.delete_jumps_to_return(code);
6113 }
6114 
reorder_short_loop(BlockList * code,BlockBegin * header_block,int header_idx)6115 void ControlFlowOptimizer::reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx) {
6116   int i = header_idx + 1;
6117   int max_end = MIN2(header_idx + ShortLoopSize, code->length());
6118   while (i < max_end && code->at(i)->loop_depth() >= header_block->loop_depth()) {
6119     i++;
6120   }
6121 
6122   if (i == code->length() || code->at(i)->loop_depth() < header_block->loop_depth()) {
6123     int end_idx = i - 1;
6124     BlockBegin* end_block = code->at(end_idx);
6125 
6126     if (end_block->number_of_sux() == 1 && end_block->sux_at(0) == header_block) {
6127       // short loop from header_idx to end_idx found -> reorder blocks such that
6128       // the header_block is the last block instead of the first block of the loop
6129       TRACE_LINEAR_SCAN(1, tty->print_cr("Reordering short loop: length %d, header B%d, end B%d",
6130                                          end_idx - header_idx + 1,
6131                                          header_block->block_id(), end_block->block_id()));
6132 
6133       for (int j = header_idx; j < end_idx; j++) {
6134         code->at_put(j, code->at(j + 1));
6135       }
6136       code->at_put(end_idx, header_block);
6137 
6138       // correct the flags so that any loop alignment occurs in the right place.
6139       assert(code->at(end_idx)->is_set(BlockBegin::backward_branch_target_flag), "must be backward branch target");
6140       code->at(end_idx)->clear(BlockBegin::backward_branch_target_flag);
6141       code->at(header_idx)->set(BlockBegin::backward_branch_target_flag);
6142     }
6143   }
6144 }
6145 
reorder_short_loops(BlockList * code)6146 void ControlFlowOptimizer::reorder_short_loops(BlockList* code) {
6147   for (int i = code->length() - 1; i >= 0; i--) {
6148     BlockBegin* block = code->at(i);
6149 
6150     if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
6151       reorder_short_loop(code, block, i);
6152     }
6153   }
6154 
6155   DEBUG_ONLY(verify(code));
6156 }
6157 
6158 // only blocks with exactly one successor can be deleted. Such blocks
6159 // must always end with an unconditional branch to this successor
can_delete_block(BlockBegin * block)6160 bool ControlFlowOptimizer::can_delete_block(BlockBegin* block) {
6161   if (block->number_of_sux() != 1 || block->number_of_exception_handlers() != 0 || block->is_entry_block()) {
6162     return false;
6163   }
6164 
6165   LIR_OpList* instructions = block->lir()->instructions_list();
6166 
6167   assert(instructions->length() >= 2, "block must have label and branch");
6168   assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6169   assert(instructions->last()->as_OpBranch() != NULL, "last instrcution must always be a branch");
6170   assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "branch must be unconditional");
6171   assert(instructions->last()->as_OpBranch()->block() == block->sux_at(0), "branch target must be the successor");
6172 
6173   // block must have exactly one successor
6174 
6175   if (instructions->length() == 2 && instructions->last()->info() == NULL) {
6176     return true;
6177   }
6178   return false;
6179 }
6180 
6181 // substitute branch targets in all branch-instructions of this blocks
substitute_branch_target(BlockBegin * block,BlockBegin * target_from,BlockBegin * target_to)6182 void ControlFlowOptimizer::substitute_branch_target(BlockBegin* block, BlockBegin* target_from, BlockBegin* target_to) {
6183   TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting empty block: substituting from B%d to B%d inside B%d", target_from->block_id(), target_to->block_id(), block->block_id()));
6184 
6185   LIR_OpList* instructions = block->lir()->instructions_list();
6186 
6187   assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6188   for (int i = instructions->length() - 1; i >= 1; i--) {
6189     LIR_Op* op = instructions->at(i);
6190 
6191     if (op->code() == lir_branch || op->code() == lir_cond_float_branch) {
6192       assert(op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6193       LIR_OpBranch* branch = (LIR_OpBranch*)op;
6194 
6195       if (branch->block() == target_from) {
6196         branch->change_block(target_to);
6197       }
6198       if (branch->ublock() == target_from) {
6199         branch->change_ublock(target_to);
6200       }
6201     }
6202   }
6203 }
6204 
delete_empty_blocks(BlockList * code)6205 void ControlFlowOptimizer::delete_empty_blocks(BlockList* code) {
6206   int old_pos = 0;
6207   int new_pos = 0;
6208   int num_blocks = code->length();
6209 
6210   while (old_pos < num_blocks) {
6211     BlockBegin* block = code->at(old_pos);
6212 
6213     if (can_delete_block(block)) {
6214       BlockBegin* new_target = block->sux_at(0);
6215 
6216       // propagate backward branch target flag for correct code alignment
6217       if (block->is_set(BlockBegin::backward_branch_target_flag)) {
6218         new_target->set(BlockBegin::backward_branch_target_flag);
6219       }
6220 
6221       // collect a list with all predecessors that contains each predecessor only once
6222       // the predecessors of cur are changed during the substitution, so a copy of the
6223       // predecessor list is necessary
6224       int j;
6225       _original_preds.clear();
6226       for (j = block->number_of_preds() - 1; j >= 0; j--) {
6227         BlockBegin* pred = block->pred_at(j);
6228         if (_original_preds.find(pred) == -1) {
6229           _original_preds.append(pred);
6230         }
6231       }
6232 
6233       for (j = _original_preds.length() - 1; j >= 0; j--) {
6234         BlockBegin* pred = _original_preds.at(j);
6235         substitute_branch_target(pred, block, new_target);
6236         pred->substitute_sux(block, new_target);
6237       }
6238     } else {
6239       // adjust position of this block in the block list if blocks before
6240       // have been deleted
6241       if (new_pos != old_pos) {
6242         code->at_put(new_pos, code->at(old_pos));
6243       }
6244       new_pos++;
6245     }
6246     old_pos++;
6247   }
6248   code->trunc_to(new_pos);
6249 
6250   DEBUG_ONLY(verify(code));
6251 }
6252 
delete_unnecessary_jumps(BlockList * code)6253 void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
6254   // skip the last block because there a branch is always necessary
6255   for (int i = code->length() - 2; i >= 0; i--) {
6256     BlockBegin* block = code->at(i);
6257     LIR_OpList* instructions = block->lir()->instructions_list();
6258 
6259     LIR_Op* last_op = instructions->last();
6260     if (last_op->code() == lir_branch) {
6261       assert(last_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6262       LIR_OpBranch* last_branch = (LIR_OpBranch*)last_op;
6263 
6264       assert(last_branch->block() != NULL, "last branch must always have a block as target");
6265       assert(last_branch->label() == last_branch->block()->label(), "must be equal");
6266 
6267       if (last_branch->info() == NULL) {
6268         if (last_branch->block() == code->at(i + 1)) {
6269 
6270           TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
6271 
6272           // delete last branch instruction
6273           instructions->trunc_to(instructions->length() - 1);
6274 
6275         } else {
6276           LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6277           if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
6278             assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6279             LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
6280 
6281             if (prev_branch->stub() == NULL) {
6282 
6283               LIR_Op2* prev_cmp = NULL;
6284               // There might be a cmove inserted for profiling which depends on the same
6285               // compare. If we change the condition of the respective compare, we have
6286               // to take care of this cmove as well.
6287               LIR_Op2* prev_cmove = NULL;
6288 
6289               for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
6290                 prev_op = instructions->at(j);
6291                 // check for the cmove
6292                 if (prev_op->code() == lir_cmove) {
6293                   assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2");
6294                   prev_cmove = (LIR_Op2*)prev_op;
6295                   assert(prev_branch->cond() == prev_cmove->condition(), "should be the same");
6296                 }
6297                 if (prev_op->code() == lir_cmp) {
6298                   assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
6299                   prev_cmp = (LIR_Op2*)prev_op;
6300                   assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
6301                 }
6302               }
6303               // Guarantee because it is dereferenced below.
6304               guarantee(prev_cmp != NULL, "should have found comp instruction for branch");
6305               if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6306 
6307                 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6308 
6309                 // eliminate a conditional branch to the immediate successor
6310                 prev_branch->change_block(last_branch->block());
6311                 prev_branch->negate_cond();
6312                 prev_cmp->set_condition(prev_branch->cond());
6313                 instructions->trunc_to(instructions->length() - 1);
6314                 // if we do change the condition, we have to change the cmove as well
6315                 if (prev_cmove != NULL) {
6316                   prev_cmove->set_condition(prev_branch->cond());
6317                   LIR_Opr t = prev_cmove->in_opr1();
6318                   prev_cmove->set_in_opr1(prev_cmove->in_opr2());
6319                   prev_cmove->set_in_opr2(t);
6320                 }
6321               }
6322             }
6323           }
6324         }
6325       }
6326     }
6327   }
6328 
6329   DEBUG_ONLY(verify(code));
6330 }
6331 
delete_jumps_to_return(BlockList * code)6332 void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
6333 #ifdef ASSERT
6334   ResourceBitMap return_converted(BlockBegin::number_of_blocks());
6335 #endif
6336 
6337   for (int i = code->length() - 1; i >= 0; i--) {
6338     BlockBegin* block = code->at(i);
6339     LIR_OpList* cur_instructions = block->lir()->instructions_list();
6340     LIR_Op*     cur_last_op = cur_instructions->last();
6341 
6342     assert(cur_instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6343     if (cur_instructions->length() == 2 && cur_last_op->code() == lir_return) {
6344       // the block contains only a label and a return
6345       // if a predecessor ends with an unconditional jump to this block, then the jump
6346       // can be replaced with a return instruction
6347       //
6348       // Note: the original block with only a return statement cannot be deleted completely
6349       //       because the predecessors might have other (conditional) jumps to this block
6350       //       -> this may lead to unnecesary return instructions in the final code
6351 
6352       assert(cur_last_op->info() == NULL, "return instructions do not have debug information");
6353       assert(block->number_of_sux() == 0 ||
6354              (return_converted.at(block->block_id()) && block->number_of_sux() == 1),
6355              "blocks that end with return must not have successors");
6356 
6357       assert(cur_last_op->as_Op1() != NULL, "return must be LIR_Op1");
6358       LIR_Opr return_opr = ((LIR_Op1*)cur_last_op)->in_opr();
6359 
6360       for (int j = block->number_of_preds() - 1; j >= 0; j--) {
6361         BlockBegin* pred = block->pred_at(j);
6362         LIR_OpList* pred_instructions = pred->lir()->instructions_list();
6363         LIR_Op*     pred_last_op = pred_instructions->last();
6364 
6365         if (pred_last_op->code() == lir_branch) {
6366           assert(pred_last_op->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
6367           LIR_OpBranch* pred_last_branch = (LIR_OpBranch*)pred_last_op;
6368 
6369           if (pred_last_branch->block() == block && pred_last_branch->cond() == lir_cond_always && pred_last_branch->info() == NULL) {
6370             // replace the jump to a return with a direct return
6371             // Note: currently the edge between the blocks is not deleted
6372             pred_instructions->at_put(pred_instructions->length() - 1, new LIR_Op1(lir_return, return_opr));
6373 #ifdef ASSERT
6374             return_converted.set_bit(pred->block_id());
6375 #endif
6376           }
6377         }
6378       }
6379     }
6380   }
6381 }
6382 
6383 
6384 #ifdef ASSERT
verify(BlockList * code)6385 void ControlFlowOptimizer::verify(BlockList* code) {
6386   for (int i = 0; i < code->length(); i++) {
6387     BlockBegin* block = code->at(i);
6388     LIR_OpList* instructions = block->lir()->instructions_list();
6389 
6390     int j;
6391     for (j = 0; j < instructions->length(); j++) {
6392       LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch();
6393 
6394       if (op_branch != NULL) {
6395         assert(op_branch->block() == NULL || code->find(op_branch->block()) != -1, "branch target not valid");
6396         assert(op_branch->ublock() == NULL || code->find(op_branch->ublock()) != -1, "branch target not valid");
6397       }
6398     }
6399 
6400     for (j = 0; j < block->number_of_sux() - 1; j++) {
6401       BlockBegin* sux = block->sux_at(j);
6402       assert(code->find(sux) != -1, "successor not valid");
6403     }
6404 
6405     for (j = 0; j < block->number_of_preds() - 1; j++) {
6406       BlockBegin* pred = block->pred_at(j);
6407       assert(code->find(pred) != -1, "successor not valid");
6408     }
6409   }
6410 }
6411 #endif
6412 
6413 
6414 #ifndef PRODUCT
6415 
6416 // Implementation of LinearStatistic
6417 
counter_name(int counter_idx)6418 const char* LinearScanStatistic::counter_name(int counter_idx) {
6419   switch (counter_idx) {
6420     case counter_method:          return "compiled methods";
6421     case counter_fpu_method:      return "methods using fpu";
6422     case counter_loop_method:     return "methods with loops";
6423     case counter_exception_method:return "methods with xhandler";
6424 
6425     case counter_loop:            return "loops";
6426     case counter_block:           return "blocks";
6427     case counter_loop_block:      return "blocks inside loop";
6428     case counter_exception_block: return "exception handler entries";
6429     case counter_interval:        return "intervals";
6430     case counter_fixed_interval:  return "fixed intervals";
6431     case counter_range:           return "ranges";
6432     case counter_fixed_range:     return "fixed ranges";
6433     case counter_use_pos:         return "use positions";
6434     case counter_fixed_use_pos:   return "fixed use positions";
6435     case counter_spill_slots:     return "spill slots";
6436 
6437     // counter for classes of lir instructions
6438     case counter_instruction:     return "total instructions";
6439     case counter_label:           return "labels";
6440     case counter_entry:           return "method entries";
6441     case counter_return:          return "method returns";
6442     case counter_call:            return "method calls";
6443     case counter_move:            return "moves";
6444     case counter_cmp:             return "compare";
6445     case counter_cond_branch:     return "conditional branches";
6446     case counter_uncond_branch:   return "unconditional branches";
6447     case counter_stub_branch:     return "branches to stub";
6448     case counter_alu:             return "artithmetic + logic";
6449     case counter_alloc:           return "allocations";
6450     case counter_sync:            return "synchronisation";
6451     case counter_throw:           return "throw";
6452     case counter_unwind:          return "unwind";
6453     case counter_typecheck:       return "type+null-checks";
6454     case counter_fpu_stack:       return "fpu-stack";
6455     case counter_misc_inst:       return "other instructions";
6456     case counter_other_inst:      return "misc. instructions";
6457 
6458     // counter for different types of moves
6459     case counter_move_total:      return "total moves";
6460     case counter_move_reg_reg:    return "register->register";
6461     case counter_move_reg_stack:  return "register->stack";
6462     case counter_move_stack_reg:  return "stack->register";
6463     case counter_move_stack_stack:return "stack->stack";
6464     case counter_move_reg_mem:    return "register->memory";
6465     case counter_move_mem_reg:    return "memory->register";
6466     case counter_move_const_any:  return "constant->any";
6467 
6468     case blank_line_1:            return "";
6469     case blank_line_2:            return "";
6470 
6471     default: ShouldNotReachHere(); return "";
6472   }
6473 }
6474 
base_counter(int counter_idx)6475 LinearScanStatistic::Counter LinearScanStatistic::base_counter(int counter_idx) {
6476   if (counter_idx == counter_fpu_method || counter_idx == counter_loop_method || counter_idx == counter_exception_method) {
6477     return counter_method;
6478   } else if (counter_idx == counter_loop_block || counter_idx == counter_exception_block) {
6479     return counter_block;
6480   } else if (counter_idx >= counter_instruction && counter_idx <= counter_other_inst) {
6481     return counter_instruction;
6482   } else if (counter_idx >= counter_move_total && counter_idx <= counter_move_const_any) {
6483     return counter_move_total;
6484   }
6485   return invalid_counter;
6486 }
6487 
LinearScanStatistic()6488 LinearScanStatistic::LinearScanStatistic() {
6489   for (int i = 0; i < number_of_counters; i++) {
6490     _counters_sum[i] = 0;
6491     _counters_max[i] = -1;
6492   }
6493 
6494 }
6495 
6496 // add the method-local numbers to the total sum
sum_up(LinearScanStatistic & method_statistic)6497 void LinearScanStatistic::sum_up(LinearScanStatistic &method_statistic) {
6498   for (int i = 0; i < number_of_counters; i++) {
6499     _counters_sum[i] += method_statistic._counters_sum[i];
6500     _counters_max[i] = MAX2(_counters_max[i], method_statistic._counters_sum[i]);
6501   }
6502 }
6503 
print(const char * title)6504 void LinearScanStatistic::print(const char* title) {
6505   if (CountLinearScan || TraceLinearScanLevel > 0) {
6506     tty->cr();
6507     tty->print_cr("***** LinearScan statistic - %s *****", title);
6508 
6509     for (int i = 0; i < number_of_counters; i++) {
6510       if (_counters_sum[i] > 0 || _counters_max[i] >= 0) {
6511         tty->print("%25s: %8d", counter_name(i), _counters_sum[i]);
6512 
6513         LinearScanStatistic::Counter cntr = base_counter(i);
6514         if (cntr != invalid_counter) {
6515           tty->print("  (%5.1f%%) ", _counters_sum[i] * 100.0 / _counters_sum[cntr]);
6516         } else {
6517           tty->print("           ");
6518         }
6519 
6520         if (_counters_max[i] >= 0) {
6521           tty->print("%8d", _counters_max[i]);
6522         }
6523       }
6524       tty->cr();
6525     }
6526   }
6527 }
6528 
collect(LinearScan * allocator)6529 void LinearScanStatistic::collect(LinearScan* allocator) {
6530   inc_counter(counter_method);
6531   if (allocator->has_fpu_registers()) {
6532     inc_counter(counter_fpu_method);
6533   }
6534   if (allocator->num_loops() > 0) {
6535     inc_counter(counter_loop_method);
6536   }
6537   inc_counter(counter_loop, allocator->num_loops());
6538   inc_counter(counter_spill_slots, allocator->max_spills());
6539 
6540   int i;
6541   for (i = 0; i < allocator->interval_count(); i++) {
6542     Interval* cur = allocator->interval_at(i);
6543 
6544     if (cur != NULL) {
6545       inc_counter(counter_interval);
6546       inc_counter(counter_use_pos, cur->num_use_positions());
6547       if (LinearScan::is_precolored_interval(cur)) {
6548         inc_counter(counter_fixed_interval);
6549         inc_counter(counter_fixed_use_pos, cur->num_use_positions());
6550       }
6551 
6552       Range* range = cur->first();
6553       while (range != Range::end()) {
6554         inc_counter(counter_range);
6555         if (LinearScan::is_precolored_interval(cur)) {
6556           inc_counter(counter_fixed_range);
6557         }
6558         range = range->next();
6559       }
6560     }
6561   }
6562 
6563   bool has_xhandlers = false;
6564   // Note: only count blocks that are in code-emit order
6565   for (i = 0; i < allocator->ir()->code()->length(); i++) {
6566     BlockBegin* cur = allocator->ir()->code()->at(i);
6567 
6568     inc_counter(counter_block);
6569     if (cur->loop_depth() > 0) {
6570       inc_counter(counter_loop_block);
6571     }
6572     if (cur->is_set(BlockBegin::exception_entry_flag)) {
6573       inc_counter(counter_exception_block);
6574       has_xhandlers = true;
6575     }
6576 
6577     LIR_OpList* instructions = cur->lir()->instructions_list();
6578     for (int j = 0; j < instructions->length(); j++) {
6579       LIR_Op* op = instructions->at(j);
6580 
6581       inc_counter(counter_instruction);
6582 
6583       switch (op->code()) {
6584         case lir_label:           inc_counter(counter_label); break;
6585         case lir_std_entry:
6586         case lir_osr_entry:       inc_counter(counter_entry); break;
6587         case lir_return:          inc_counter(counter_return); break;
6588 
6589         case lir_rtcall:
6590         case lir_static_call:
6591         case lir_optvirtual_call:
6592         case lir_virtual_call:    inc_counter(counter_call); break;
6593 
6594         case lir_move: {
6595           inc_counter(counter_move);
6596           inc_counter(counter_move_total);
6597 
6598           LIR_Opr in = op->as_Op1()->in_opr();
6599           LIR_Opr res = op->as_Op1()->result_opr();
6600           if (in->is_register()) {
6601             if (res->is_register()) {
6602               inc_counter(counter_move_reg_reg);
6603             } else if (res->is_stack()) {
6604               inc_counter(counter_move_reg_stack);
6605             } else if (res->is_address()) {
6606               inc_counter(counter_move_reg_mem);
6607             } else {
6608               ShouldNotReachHere();
6609             }
6610           } else if (in->is_stack()) {
6611             if (res->is_register()) {
6612               inc_counter(counter_move_stack_reg);
6613             } else {
6614               inc_counter(counter_move_stack_stack);
6615             }
6616           } else if (in->is_address()) {
6617             assert(res->is_register(), "must be");
6618             inc_counter(counter_move_mem_reg);
6619           } else if (in->is_constant()) {
6620             inc_counter(counter_move_const_any);
6621           } else {
6622             ShouldNotReachHere();
6623           }
6624           break;
6625         }
6626 
6627         case lir_cmp:             inc_counter(counter_cmp); break;
6628 
6629         case lir_branch:
6630         case lir_cond_float_branch: {
6631           LIR_OpBranch* branch = op->as_OpBranch();
6632           if (branch->block() == NULL) {
6633             inc_counter(counter_stub_branch);
6634           } else if (branch->cond() == lir_cond_always) {
6635             inc_counter(counter_uncond_branch);
6636           } else {
6637             inc_counter(counter_cond_branch);
6638           }
6639           break;
6640         }
6641 
6642         case lir_neg:
6643         case lir_add:
6644         case lir_sub:
6645         case lir_mul:
6646         case lir_mul_strictfp:
6647         case lir_div:
6648         case lir_div_strictfp:
6649         case lir_rem:
6650         case lir_sqrt:
6651         case lir_abs:
6652         case lir_log10:
6653         case lir_logic_and:
6654         case lir_logic_or:
6655         case lir_logic_xor:
6656         case lir_shl:
6657         case lir_shr:
6658         case lir_ushr:            inc_counter(counter_alu); break;
6659 
6660         case lir_alloc_object:
6661         case lir_alloc_array:     inc_counter(counter_alloc); break;
6662 
6663         case lir_monaddr:
6664         case lir_lock:
6665         case lir_unlock:          inc_counter(counter_sync); break;
6666 
6667         case lir_throw:           inc_counter(counter_throw); break;
6668 
6669         case lir_unwind:          inc_counter(counter_unwind); break;
6670 
6671         case lir_null_check:
6672         case lir_leal:
6673         case lir_instanceof:
6674         case lir_checkcast:
6675         case lir_store_check:     inc_counter(counter_typecheck); break;
6676 
6677         case lir_fpop_raw:
6678         case lir_fxch:
6679         case lir_fld:             inc_counter(counter_fpu_stack); break;
6680 
6681         case lir_nop:
6682         case lir_push:
6683         case lir_pop:
6684         case lir_convert:
6685         case lir_roundfp:
6686         case lir_cmove:           inc_counter(counter_misc_inst); break;
6687 
6688         default:                  inc_counter(counter_other_inst); break;
6689       }
6690     }
6691   }
6692 
6693   if (has_xhandlers) {
6694     inc_counter(counter_exception_method);
6695   }
6696 }
6697 
compute(LinearScan * allocator,LinearScanStatistic & global_statistic)6698 void LinearScanStatistic::compute(LinearScan* allocator, LinearScanStatistic &global_statistic) {
6699   if (CountLinearScan || TraceLinearScanLevel > 0) {
6700 
6701     LinearScanStatistic local_statistic = LinearScanStatistic();
6702 
6703     local_statistic.collect(allocator);
6704     global_statistic.sum_up(local_statistic);
6705 
6706     if (TraceLinearScanLevel > 2) {
6707       local_statistic.print("current local statistic");
6708     }
6709   }
6710 }
6711 
6712 
6713 // Implementation of LinearTimers
6714 
LinearScanTimers()6715 LinearScanTimers::LinearScanTimers() {
6716   for (int i = 0; i < number_of_timers; i++) {
6717     timer(i)->reset();
6718   }
6719 }
6720 
timer_name(int idx)6721 const char* LinearScanTimers::timer_name(int idx) {
6722   switch (idx) {
6723     case timer_do_nothing:               return "Nothing (Time Check)";
6724     case timer_number_instructions:      return "Number Instructions";
6725     case timer_compute_local_live_sets:  return "Local Live Sets";
6726     case timer_compute_global_live_sets: return "Global Live Sets";
6727     case timer_build_intervals:          return "Build Intervals";
6728     case timer_sort_intervals_before:    return "Sort Intervals Before";
6729     case timer_allocate_registers:       return "Allocate Registers";
6730     case timer_resolve_data_flow:        return "Resolve Data Flow";
6731     case timer_sort_intervals_after:     return "Sort Intervals After";
6732     case timer_eliminate_spill_moves:    return "Spill optimization";
6733     case timer_assign_reg_num:           return "Assign Reg Num";
6734     case timer_allocate_fpu_stack:       return "Allocate FPU Stack";
6735     case timer_optimize_lir:             return "Optimize LIR";
6736     default: ShouldNotReachHere();       return "";
6737   }
6738 }
6739 
begin_method()6740 void LinearScanTimers::begin_method() {
6741   if (TimeEachLinearScan) {
6742     // reset all timers to measure only current method
6743     for (int i = 0; i < number_of_timers; i++) {
6744       timer(i)->reset();
6745     }
6746   }
6747 }
6748 
end_method(LinearScan * allocator)6749 void LinearScanTimers::end_method(LinearScan* allocator) {
6750   if (TimeEachLinearScan) {
6751 
6752     double c = timer(timer_do_nothing)->seconds();
6753     double total = 0;
6754     for (int i = 1; i < number_of_timers; i++) {
6755       total += timer(i)->seconds() - c;
6756     }
6757 
6758     if (total >= 0.0005) {
6759       // print all information in one line for automatic processing
6760       tty->print("@"); allocator->compilation()->method()->print_name();
6761 
6762       tty->print("@ %d ", allocator->compilation()->method()->code_size());
6763       tty->print("@ %d ", allocator->block_at(allocator->block_count() - 1)->last_lir_instruction_id() / 2);
6764       tty->print("@ %d ", allocator->block_count());
6765       tty->print("@ %d ", allocator->num_virtual_regs());
6766       tty->print("@ %d ", allocator->interval_count());
6767       tty->print("@ %d ", allocator->_num_calls);
6768       tty->print("@ %d ", allocator->num_loops());
6769 
6770       tty->print("@ %6.6f ", total);
6771       for (int i = 1; i < number_of_timers; i++) {
6772         tty->print("@ %4.1f ", ((timer(i)->seconds() - c) / total) * 100);
6773       }
6774       tty->cr();
6775     }
6776   }
6777 }
6778 
print(double total_time)6779 void LinearScanTimers::print(double total_time) {
6780   if (TimeLinearScan) {
6781     // correction value: sum of dummy-timer that only measures the time that
6782     // is necesary to start and stop itself
6783     double c = timer(timer_do_nothing)->seconds();
6784 
6785     for (int i = 0; i < number_of_timers; i++) {
6786       double t = timer(i)->seconds();
6787       tty->print_cr("    %25s: %6.3f s (%4.1f%%)  corrected: %6.3f s (%4.1f%%)", timer_name(i), t, (t / total_time) * 100.0, t - c, (t - c) / (total_time - 2 * number_of_timers * c) * 100);
6788     }
6789   }
6790 }
6791 
6792 #endif // #ifndef PRODUCT
6793