1 /*
2  * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 
25 package org.graalvm.compiler.lir.alloc.trace.lsra;
26 
27 import static jdk.vm.ci.code.CodeUtil.isOdd;
28 import static jdk.vm.ci.code.ValueUtil.asRegister;
29 import static jdk.vm.ci.code.ValueUtil.isRegister;
30 import static org.graalvm.compiler.lir.LIRValueUtil.isStackSlotValue;
31 import static org.graalvm.compiler.lir.LIRValueUtil.isVariable;
32 
33 import java.util.ArrayList;
34 import java.util.Arrays;
35 import java.util.BitSet;
36 
37 import org.graalvm.compiler.core.common.alloc.RegisterAllocationConfig.AllocatableRegisters;
38 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
39 import org.graalvm.compiler.core.common.util.Util;
40 import org.graalvm.compiler.debug.DebugContext;
41 import org.graalvm.compiler.debug.GraalError;
42 import org.graalvm.compiler.debug.Indent;
43 import org.graalvm.compiler.lir.LIRInstruction;
44 import org.graalvm.compiler.lir.LIRValueUtil;
45 import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
46 import org.graalvm.compiler.lir.StandardOp.LabelOp;
47 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
48 import org.graalvm.compiler.lir.alloc.OutOfRegistersException;
49 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.RegisterPriority;
50 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.SpillState;
51 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceInterval.State;
52 import org.graalvm.compiler.lir.alloc.trace.lsra.TraceLinearScanPhase.TraceLinearScan;
53 import org.graalvm.compiler.lir.ssa.SSAUtil;
54 
55 import jdk.vm.ci.code.Register;
56 import jdk.vm.ci.meta.Value;
57 
58 /**
59  */
60 final class TraceLinearScanWalker {
61 
62     /**
63      * Adds an interval to a list sorted by {@linkplain FixedInterval#currentFrom() current from}
64      * positions.
65      *
66      * @param list the list
67      * @param interval the interval to add
68      * @return the new head of the list
69      */
addToListSortedByCurrentFromPositions(FixedInterval list, FixedInterval interval)70     private static FixedInterval addToListSortedByCurrentFromPositions(FixedInterval list, FixedInterval interval) {
71         FixedInterval prev = null;
72         FixedInterval cur = list;
73         while (cur.currentFrom() < interval.currentFrom()) {
74             prev = cur;
75             cur = cur.next;
76         }
77         FixedInterval result = list;
78         if (prev == null) {
79             // add to head of list
80             result = interval;
81         } else {
82             // add before 'cur'
83             prev.next = interval;
84         }
85         interval.next = cur;
86         return result;
87     }
88 
89     /**
90      * Adds an interval to a list sorted by {@linkplain TraceInterval#from() current from}
91      * positions.
92      *
93      * @param list the list
94      * @param interval the interval to add
95      * @return the new head of the list
96      */
addToListSortedByFromPositions(TraceInterval list, TraceInterval interval)97     private static TraceInterval addToListSortedByFromPositions(TraceInterval list, TraceInterval interval) {
98         TraceInterval prev = null;
99         TraceInterval cur = list;
100         while (cur.from() < interval.from()) {
101             prev = cur;
102             cur = cur.next;
103         }
104         TraceInterval result = list;
105         if (prev == null) {
106             // add to head of list
107             result = interval;
108         } else {
109             // add before 'cur'
110             prev.next = interval;
111         }
112         interval.next = cur;
113         return result;
114     }
115 
116     /**
117      * Adds an interval to a list sorted by {@linkplain TraceInterval#from() start} positions and
118      * {@linkplain TraceInterval#firstUsage(RegisterPriority) first usage} positions.
119      *
120      * @param list the list
121      * @param interval the interval to add
122      * @return the new head of the list
123      */
addToListSortedByStartAndUsePositions(TraceInterval list, TraceInterval interval)124     private static TraceInterval addToListSortedByStartAndUsePositions(TraceInterval list, TraceInterval interval) {
125         TraceInterval newHead = list;
126         TraceInterval prev = null;
127         TraceInterval cur = newHead;
128         while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) {
129             prev = cur;
130             cur = cur.next;
131         }
132         if (prev == null) {
133             newHead = interval;
134         } else {
135             prev.next = interval;
136         }
137         interval.next = cur;
138         return newHead;
139     }
140 
141     /**
142      * Removes an interval from a list.
143      *
144      * @param list the list
145      * @param interval the interval to remove
146      * @return the new head of the list
147      */
removeAny(TraceInterval list, TraceInterval interval)148     private static TraceInterval removeAny(TraceInterval list, TraceInterval interval) {
149         TraceInterval newHead = list;
150         TraceInterval prev = null;
151         TraceInterval cur = newHead;
152         while (cur != interval) {
153             assert cur != null && cur != TraceInterval.EndMarker : "interval has not been found in list: " + interval;
154             prev = cur;
155             cur = cur.next;
156         }
157         if (prev == null) {
158             newHead = cur.next;
159         } else {
160             prev.next = cur.next;
161         }
162         return newHead;
163     }
164 
165     private Register[] availableRegs;
166 
167     private final int[] usePos;
168     private final int[] blockPos;
169     private final BitSet isInMemory;
170 
171     private final ArrayList<TraceInterval>[] spillIntervals;
172 
173     private TraceLocalMoveResolver moveResolver; // for ordering spill moves
174 
175     private int minReg;
176 
177     private int maxReg;
178 
179     private final TraceLinearScan allocator;
180     private final DebugContext debug;
181 
182     /**
183      * Sorted list of intervals, not live before the current position.
184      */
185     private TraceInterval unhandledAnyList;
186 
187     /**
188      * Sorted list of intervals, live at the current position.
189      */
190     private TraceInterval activeAnyList;
191 
192     private FixedInterval activeFixedList;
193 
194     /**
195      * Sorted list of intervals in a life time hole at the current position.
196      */
197     private FixedInterval inactiveFixedList;
198 
199     /**
200      * The current position (intercept point through the intervals).
201      */
202     private int currentPosition;
203 
204     /**
205      * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used,
206      * they can grow quite long. The maximum length observed was 45 (all numbers taken from a
207      * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker
208      * value, and allocate a "real" list only on demand in {@link #setUsePos}.
209      */
210     private static final ArrayList<TraceInterval> EMPTY_LIST = new ArrayList<>(0);
211 
212     // accessors mapped to same functions in class LinearScan
blockCount()213     private int blockCount() {
214         return allocator.blockCount();
215     }
216 
blockAt(int idx)217     private AbstractBlockBase<?> blockAt(int idx) {
218         return allocator.blockAt(idx);
219     }
220 
221     @SuppressWarnings("unused")
blockOfOpWithId(int opId)222     private AbstractBlockBase<?> blockOfOpWithId(int opId) {
223         return allocator.blockForId(opId);
224     }
225 
TraceLinearScanWalker(TraceLinearScan allocator, FixedInterval unhandledFixedFirst, TraceInterval unhandledAnyFirst)226     TraceLinearScanWalker(TraceLinearScan allocator, FixedInterval unhandledFixedFirst, TraceInterval unhandledAnyFirst) {
227         this.allocator = allocator;
228         this.debug = allocator.getDebug();
229 
230         unhandledAnyList = unhandledAnyFirst;
231         activeAnyList = TraceInterval.EndMarker;
232         activeFixedList = FixedInterval.EndMarker;
233         // we don't need a separate unhandled list for fixed.
234         inactiveFixedList = unhandledFixedFirst;
235         currentPosition = -1;
236 
237         moveResolver = allocator.createMoveResolver();
238         int numRegs = allocator.getRegisters().size();
239         spillIntervals = Util.uncheckedCast(new ArrayList<?>[numRegs]);
240         for (int i = 0; i < numRegs; i++) {
241             spillIntervals[i] = EMPTY_LIST;
242         }
243         usePos = new int[numRegs];
244         blockPos = new int[numRegs];
245         isInMemory = new BitSet(numRegs);
246     }
247 
initUseLists(boolean onlyProcessUsePos)248     private void initUseLists(boolean onlyProcessUsePos) {
249         for (Register register : availableRegs) {
250             int i = register.number;
251             usePos[i] = Integer.MAX_VALUE;
252 
253             if (!onlyProcessUsePos) {
254                 blockPos[i] = Integer.MAX_VALUE;
255                 spillIntervals[i].clear();
256                 isInMemory.clear(i);
257             }
258         }
259     }
260 
maxRegisterNumber()261     private int maxRegisterNumber() {
262         return maxReg;
263     }
264 
minRegisterNumber()265     private int minRegisterNumber() {
266         return minReg;
267     }
268 
isRegisterInRange(int reg)269     private boolean isRegisterInRange(int reg) {
270         return reg >= minRegisterNumber() && reg <= maxRegisterNumber();
271     }
272 
excludeFromUse(IntervalHint i)273     private void excludeFromUse(IntervalHint i) {
274         Value location = i.location();
275         int i1 = asRegister(location).number;
276         if (isRegisterInRange(i1)) {
277             usePos[i1] = 0;
278         }
279     }
280 
setUsePos(TraceInterval interval, int usePos, boolean onlyProcessUsePos)281     private void setUsePos(TraceInterval interval, int usePos, boolean onlyProcessUsePos) {
282         if (usePos != -1) {
283             assert usePos != 0 : "must use excludeFromUse to set usePos to 0";
284             int i = asRegister(interval.location()).number;
285             if (isRegisterInRange(i)) {
286                 if (this.usePos[i] > usePos) {
287                     this.usePos[i] = usePos;
288                 }
289                 if (!onlyProcessUsePos) {
290                     ArrayList<TraceInterval> list = spillIntervals[i];
291                     if (list == EMPTY_LIST) {
292                         list = new ArrayList<>(2);
293                         spillIntervals[i] = list;
294                     }
295                     list.add(interval);
296                     // set is in memory flag
297                     if (interval.inMemoryAt(currentPosition)) {
298                         isInMemory.set(i);
299                     }
300                 }
301             }
302         }
303     }
304 
setUsePos(FixedInterval interval, int usePos, boolean onlyProcessUsePos)305     private void setUsePos(FixedInterval interval, int usePos, boolean onlyProcessUsePos) {
306         assert onlyProcessUsePos;
307         if (usePos != -1) {
308             assert usePos != 0 : "must use excludeFromUse to set usePos to 0";
309             int i = asRegister(interval.location()).number;
310             if (isRegisterInRange(i)) {
311                 if (this.usePos[i] > usePos) {
312                     this.usePos[i] = usePos;
313                 }
314             }
315         }
316     }
317 
setBlockPos(IntervalHint i, int blockPos)318     private void setBlockPos(IntervalHint i, int blockPos) {
319         if (blockPos != -1) {
320             int reg = asRegister(i.location()).number;
321             if (isRegisterInRange(reg)) {
322                 if (this.blockPos[reg] > blockPos) {
323                     this.blockPos[reg] = blockPos;
324                 }
325                 if (usePos[reg] > blockPos) {
326                     usePos[reg] = blockPos;
327                 }
328             }
329         }
330     }
331 
freeExcludeActiveFixed()332     private void freeExcludeActiveFixed() {
333         FixedInterval interval = activeFixedList;
334         while (interval != FixedInterval.EndMarker) {
335             assert isRegister(interval.location()) : "active interval must have a register assigned";
336             excludeFromUse(interval);
337             interval = interval.next;
338         }
339     }
340 
freeExcludeActiveAny()341     private void freeExcludeActiveAny() {
342         TraceInterval interval = activeAnyList;
343         while (interval != TraceInterval.EndMarker) {
344             assert isRegister(interval.location()) : "active interval must have a register assigned";
345             excludeFromUse(interval);
346             interval = interval.next;
347         }
348     }
349 
freeCollectInactiveFixed(TraceInterval current)350     private void freeCollectInactiveFixed(TraceInterval current) {
351         FixedInterval interval = inactiveFixedList;
352         while (interval != FixedInterval.EndMarker) {
353             if (current.to() <= interval.from()) {
354                 assert interval.intersectsAt(current) == -1 : "must not intersect";
355                 setUsePos(interval, interval.from(), true);
356             } else {
357                 setUsePos(interval, interval.currentIntersectsAt(current), true);
358             }
359             interval = interval.next;
360         }
361     }
362 
spillExcludeActiveFixed()363     private void spillExcludeActiveFixed() {
364         FixedInterval interval = activeFixedList;
365         while (interval != FixedInterval.EndMarker) {
366             excludeFromUse(interval);
367             interval = interval.next;
368         }
369     }
370 
spillBlockInactiveFixed(TraceInterval current)371     private void spillBlockInactiveFixed(TraceInterval current) {
372         FixedInterval interval = inactiveFixedList;
373         while (interval != FixedInterval.EndMarker) {
374             if (current.to() > interval.currentFrom()) {
375                 setBlockPos(interval, interval.currentIntersectsAt(current));
376             } else {
377                 assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect";
378             }
379 
380             interval = interval.next;
381         }
382     }
383 
spillCollectActiveAny(RegisterPriority registerPriority)384     private void spillCollectActiveAny(RegisterPriority registerPriority) {
385         TraceInterval interval = activeAnyList;
386         while (interval != TraceInterval.EndMarker) {
387             setUsePos(interval, Math.min(interval.nextUsage(registerPriority, currentPosition), interval.to()), false);
388             interval = interval.next;
389         }
390     }
391 
392     @SuppressWarnings("unused")
insertIdAtBasicBlockBoundary(int opId)393     private int insertIdAtBasicBlockBoundary(int opId) {
394         assert allocator.isBlockBegin(opId) : "Not a block begin: " + opId;
395         assert allocator.instructionForId(opId) instanceof LabelOp;
396         assert allocator.instructionForId(opId - 2) instanceof BlockEndOp;
397 
398         AbstractBlockBase<?> toBlock = allocator.blockForId(opId);
399         AbstractBlockBase<?> fromBlock = allocator.blockForId(opId - 2);
400 
401         if (fromBlock.getSuccessorCount() == 1) {
402             // insert move in predecessor
403             return opId - 2;
404         }
405         assert toBlock.getPredecessorCount() == 1 : String.format("Critical Edge? %s->%s", fromBlock, toBlock);
406         // insert move in successor
407         return opId + 2;
408     }
409 
insertMove(int operandId, TraceInterval srcIt, TraceInterval dstIt)410     private void insertMove(int operandId, TraceInterval srcIt, TraceInterval dstIt) {
411         // output all moves here. When source and target are equal, the move is
412         // optimized away later in assignRegNums
413 
414         int opId = (operandId + 1) & ~1;
415         AbstractBlockBase<?> opBlock = allocator.blockForId(opId);
416         assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary";
417 
418         // calculate index of instruction inside instruction list of current block
419         // the minimal index (for a block with no spill moves) can be calculated because the
420         // numbering of instructions is known.
421         // When the block already contains spill moves, the index must be increased until the
422         // correct index is reached.
423         ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(opBlock);
424         int index = (opId - instructions.get(0).id()) >> 1;
425         assert instructions.get(index).id() <= opId : "error in calculation";
426 
427         while (instructions.get(index).id() != opId) {
428             index++;
429             assert 0 <= index && index < instructions.size() : "index out of bounds";
430         }
431         assert 1 <= index && index < instructions.size() : "index out of bounds";
432         assert instructions.get(index).id() == opId : "error in calculation";
433 
434         // insert new instruction before instruction at position index
435         moveResolver.moveInsertPosition(instructions, index);
436         moveResolver.addMapping(srcIt, dstIt);
437     }
438 
439     private int findOptimalSplitPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) {
440         int fromBlockNr = minBlock.getLinearScanNumber();
441         int toBlockNr = maxBlock.getLinearScanNumber();
442 
443         assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range";
444         assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range";
445         assert fromBlockNr < toBlockNr : "must cross block boundary";
446 
447         // Try to split at end of maxBlock. If this would be after
448         // maxSplitPos, then use the begin of maxBlock
449         int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) + 2;
450         if (optimalSplitPos > maxSplitPos) {
451             optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock);
452         }
453 
454         // minimal block probability
455         double minProbability = maxBlock.probability();
456         for (int i = toBlockNr - 1; i >= fromBlockNr; i--) {
457             AbstractBlockBase<?> cur = blockAt(i);
458 
459             if (cur.probability() < minProbability) {
460                 // Block with lower probability found. Split at the end of this block.
461                 minProbability = cur.probability();
462                 optimalSplitPos = allocator.getLastLirInstructionId(cur) + 2;
463             }
464         }
465         assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary";
466 
467         return optimalSplitPos;
468     }
469 
470     @SuppressWarnings({"unused"})
471     private int findOptimalSplitPos(TraceInterval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) {
472         int optimalSplitPos = findOptimalSplitPos0(minSplitPos, maxSplitPos);
473         if (debug.isLogEnabled()) {
474             debug.log("optimal split position: %d", optimalSplitPos);
475         }
476         return optimalSplitPos;
477     }
478 
479     private int findOptimalSplitPos0(int minSplitPos, int maxSplitPos) {
480         if (minSplitPos == maxSplitPos) {
481             // trivial case, no optimization of split position possible
482             if (debug.isLogEnabled()) {
483                 debug.log("min-pos and max-pos are equal, no optimization possible");
484             }
485             return minSplitPos;
486 
487         }
488         assert minSplitPos < maxSplitPos : "must be true then";
489         assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise";
490 
491         // reason for using minSplitPos - 1: when the minimal split pos is exactly at the
492         // beginning of a block, then minSplitPos is also a possible split position.
493         // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 ==
494         // minSplitPos
495         AbstractBlockBase<?> minBlock = allocator.blockForId(minSplitPos - 1);
496 
497         // reason for using maxSplitPos - 1: otherwise there would be an assert on failure
498         // when an interval ends at the end of the last block of the method
499         // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no
500         // block at this opId)
501         AbstractBlockBase<?> maxBlock = allocator.blockForId(maxSplitPos - 1);
502 
503         assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order";
504         if (minBlock == maxBlock) {
505             // split position cannot be moved to block boundary : so split as late as possible
506             if (debug.isLogEnabled()) {
507                 debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block");
508             }
509             return maxSplitPos;
510 
511         }
512         // seach optimal block boundary between minSplitPos and maxSplitPos
513         if (debug.isLogEnabled()) {
514             debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId());
515         }
516 
517         return findOptimalSplitPos(minBlock, maxBlock, maxSplitPos);
518     }
519 
520     // split an interval at the optimal position between minSplitPos and
521     // maxSplitPos in two parts:
522     // 1) the left part has already a location assigned
523     // 2) the right part is sorted into to the unhandled-list
524     @SuppressWarnings("try")
525     private void splitBeforeUsage(TraceInterval interval, int minSplitPos, int maxSplitPos) {
526 
527         try (Indent indent = debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) {
528 
529             assert interval.from() < minSplitPos : "cannot split at start of interval";
530             assert currentPosition < minSplitPos : "cannot split before current position";
531             assert minSplitPos <= maxSplitPos : "invalid order";
532             assert maxSplitPos <= interval.to() : "cannot split after end of interval";
533 
534             final int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true);
535 
536             if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) {
537                 // the split position would be just before the end of the interval
538                 // . no split at all necessary
539                 if (debug.isLogEnabled()) {
540                     debug.log("no split necessary because optimal split position is at end of interval");
541                 }
542                 return;
543             }
544             // must calculate this before the actual split is performed and before split position is
545             // moved to odd opId
546             final int optimalSplitPosFinal;
547             boolean blockBegin = allocator.isBlockBegin(optimalSplitPos);
548             assert blockBegin || !allocator.isBlockBegin(optimalSplitPos - 1);
549             boolean moveNecessary = !blockBegin;
550             if (blockBegin) {
551                 optimalSplitPosFinal = optimalSplitPos;
552             } else {
553                 // move position before actual instruction (odd opId)
554                 optimalSplitPosFinal = (optimalSplitPos - 1) | 1;
555             }
556 
557             // TODO( je) better define what min split pos max split pos mean.
558             assert minSplitPos <= optimalSplitPosFinal && optimalSplitPosFinal <= maxSplitPos || minSplitPos == maxSplitPos && optimalSplitPosFinal == minSplitPos - 1 : "out of range";
559             assert optimalSplitPosFinal <= interval.to() : "cannot split after end of interval";
560             assert optimalSplitPosFinal > interval.from() : "cannot split at start of interval";
561 
562             if (debug.isLogEnabled()) {
563                 debug.log("splitting at position %d", optimalSplitPosFinal);
564             }
565             assert optimalSplitPosFinal > currentPosition : "Can not split interval " + interval + " at current position: " + currentPosition;
566 
567             assert blockBegin || ((optimalSplitPosFinal & 1) == 1) : "split pos must be odd when not on block boundary";
568             assert !blockBegin || ((optimalSplitPosFinal & 1) == 0) : "split pos must be even on block boundary";
569 
570             // TODO (je) duplicate code. try to fold
571             if (optimalSplitPosFinal == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) {
572                 // the split position would be just before the end of the interval
573                 // . no split at all necessary
574                 if (debug.isLogEnabled()) {
575                     debug.log("no split necessary because optimal split position is at end of interval");
576                 }
577                 return;
578             }
579             TraceInterval splitPart = interval.split(optimalSplitPosFinal, allocator);
580 
581             splitPart.setInsertMoveWhenActivated(moveNecessary);
582 
583             assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position";
584             unhandledAnyList = TraceLinearScanWalker.addToListSortedByStartAndUsePositions(unhandledAnyList, splitPart);
585 
586             if (debug.isLogEnabled()) {
587                 debug.log("left interval  %s: %s", moveNecessary ? "      " : "", interval.logString());
588                 debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString());
589             }
590         }
591     }
592 
593     // split an interval at the optimal position between minSplitPos and
594     // maxSplitPos in two parts:
595     // 1) the left part has already a location assigned
596     // 2) the right part is always on the stack and therefore ignored in further processing
597     @SuppressWarnings("try")
598     private void splitForSpilling(TraceInterval interval) {
599         // calculate allowed range of splitting position
600         int maxSplitPos = currentPosition;
601         int previousUsage = interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos);
602         if (previousUsage == currentPosition) {
603             /*
604              * If there is a usage with ShouldHaveRegister priority at the current position fall
605              * back to MustHaveRegister priority. This only happens if register priority was
606              * downgraded to MustHaveRegister in #allocLockedRegister.
607              */
608             previousUsage = interval.previousUsage(RegisterPriority.MustHaveRegister, maxSplitPos);
609         }
610         int minSplitPos = Math.max(previousUsage + 1, interval.from());
611 
612         try (Indent indent = debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) {
613 
614             assert interval.from() <= minSplitPos : "cannot split before start of interval";
615             assert minSplitPos <= maxSplitPos : "invalid order";
616             assert maxSplitPos < interval.to() : "cannot split at end end of interval";
617             assert currentPosition < interval.to() : "interval must not end before current position";
618 
619             if (minSplitPos == interval.from()) {
620                 // the whole interval is never used, so spill it entirely to memory
621 
622                 try (Indent indent2 = debug.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.numUsePos())) {
623 
624                     assert interval.firstUsage(RegisterPriority.MustHaveRegister) > currentPosition : String.format("interval %s must not have use position before currentPosition %d", interval,
625                                     currentPosition);
626 
627                     allocator.assignSpillSlot(interval);
628                     handleSpillSlot(interval);
629                     changeSpillState(interval, minSplitPos);
630 
631                     // Also kick parent intervals out of register to memory when they have no use
632                     // position. This avoids short interval in register surrounded by intervals in
633                     // memory . avoid useless moves from memory to register and back
634                     TraceInterval parent = interval;
635                     while (parent != null && parent.isSplitChild()) {
636                         parent = parent.getSplitChildBeforeOpId(parent.from());
637 
638                         if (isRegister(parent.location())) {
639                             if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) {
640                                 // parent is never used, so kick it out of its assigned register
641                                 if (debug.isLogEnabled()) {
642                                     debug.log("kicking out interval %d out of its register because it is never used", parent.operandNumber);
643                                 }
644                                 allocator.assignSpillSlot(parent);
645                                 handleSpillSlot(parent);
646                             } else {
647                                 // do not go further back because the register is actually used by
648                                 // the interval
649                                 parent = null;
650                             }
651                         }
652                     }
653                 }
654 
655             } else {
656                 // search optimal split pos, split interval and spill only the right hand part
657                 int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false);
658 
659                 assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range";
660                 assert optimalSplitPos < interval.to() : "cannot split at end of interval";
661                 assert optimalSplitPos >= interval.from() : "cannot split before start of interval";
662 
663                 if (!allocator.isBlockBegin(optimalSplitPos)) {
664                     // move position before actual instruction (odd opId)
665                     optimalSplitPos = (optimalSplitPos - 1) | 1;
666                 }
667 
668                 try (Indent indent2 = debug.logAndIndent("splitting at position %d", optimalSplitPos)) {
669                     assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary";
670                     assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary";
671 
672                     TraceInterval spilledPart = interval.split(optimalSplitPos, allocator);
673                     allocator.assignSpillSlot(spilledPart);
674                     handleSpillSlot(spilledPart);
675                     changeSpillState(spilledPart, optimalSplitPos);
676 
677                     if (!allocator.isBlockBegin(optimalSplitPos)) {
678                         if (debug.isLogEnabled()) {
679                             debug.log("inserting move from interval %s to %s", interval, spilledPart);
680                         }
681                         insertMove(optimalSplitPos, interval, spilledPart);
682                     } else {
683                         if (debug.isLogEnabled()) {
684                             debug.log("no need to insert move. done by data-flow resolution");
685                         }
686                     }
687 
688                     // the currentSplitChild is needed later when moves are inserted for reloading
689                     assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild";
690                     spilledPart.makeCurrentSplitChild();
691 
692                     if (debug.isLogEnabled()) {
693                         debug.log("left interval: %s", interval.logString());
694                         debug.log("spilled interval   : %s", spilledPart.logString());
695                     }
696                 }
697             }
698         }
699     }
700 
701     /**
702      * Change spill state of an interval.
703      *
704      * Note: called during register allocation.
705      *
706      * @param spillPos position of the spill
707      */
708     private void changeSpillState(TraceInterval interval, int spillPos) {
709         if (TraceLinearScanPhase.Options.LIROptTraceRAEliminateSpillMoves.getValue(allocator.getOptions())) {
710             switch (interval.spillState()) {
711                 case NoSpillStore:
712                     final int minSpillPos = calculateMinSpillPos(interval.spillDefinitionPos(), spillPos);
713                     final int maxSpillPos = calculateMaxSpillPos(minSpillPos, spillPos);
714 
715                     final int optimalSpillPos = findOptimalSpillPos(minSpillPos, maxSpillPos);
716 
717                     /* Cannot spill at block begin since it interferes with move resolution. */
718                     assert isNotBlockBeginOrMerge(optimalSpillPos) : "Spill pos at block begin: " + optimalSpillPos;
719                     assert !allocator.isBlockEnd(optimalSpillPos) : "Spill pos at block end: " + optimalSpillPos;
720                     assert (optimalSpillPos & 1) == 0 : "Spill pos must be even " + optimalSpillPos;
721 
722                     interval.setSpillDefinitionPos(optimalSpillPos);
723                     interval.setSpillState(SpillState.SpillStore);
724                     break;
725                 case SpillStore:
726                 case StartInMemory:
727                 case NoOptimization:
728                 case NoDefinitionFound:
729                     // nothing to do
730                     break;
731 
732                 default:
733                     throw GraalError.shouldNotReachHere("other states not allowed at this time");
734             }
735         } else {
736             interval.setSpillState(SpillState.NoOptimization);
737         }
738     }
739 
740     private int calculateMinSpillPos(int spillDefinitionPos, int spillPos) {
741         int spillDefinitionPosEven = spillDefinitionPos & ~1;
742         if (spillDefinitionPosEven == 0 || !allocator.isBlockBegin(spillDefinitionPosEven) || spillDefinitionPos == spillPos) {
743             assert !allocator.isBlockEnd(spillDefinitionPosEven) : "Defintion at block end? " + spillDefinitionPos;
744             return spillDefinitionPos;
745         }
746         assert allocator.isBlockBegin(spillDefinitionPosEven);
747         if (SSAUtil.isMerge(allocator.blockForId(spillDefinitionPos))) {
748             /* Spill at merge are OK since there will be no resolution moves. */
749             return spillDefinitionPos;
750         }
751         int minSpillPos = spillDefinitionPosEven + 2;
752         while (allocator.isBlockEnd(minSpillPos)) {
753             // +2 is block begin, +4 is the instruction afterwards
754             minSpillPos += 4;
755         }
756         assert minSpillPos <= spillPos : String.format("No minSpillPos found. defPos: %d, spillPos: %d, minSpillPos, %d", spillDefinitionPos, spillPos, minSpillPos);
757         return minSpillPos;
758     }
759 
760     private int calculateMaxSpillPos(final int minSpillPos, int spillPos) {
761         int spillPosEven = spillPos & ~1;
762         if (spillPosEven == 0) {
763             return spillPos;
764         }
765         if ((minSpillPos & ~1) == spillPosEven) {
766             assert isNotBlockBeginOrMerge(spillPos);
767             return spillPos;
768         }
769         int maxSpillPos;
770         /* Move away from block end. */
771         if (allocator.isBlockEnd(spillPosEven)) {
772             /* Block end. Use instruction before. */
773             maxSpillPos = spillPosEven - 2;
774         } else if (allocator.isBlockBegin(spillPosEven)) {
775             /* Block begin. Use instruction before previous block end. */
776             maxSpillPos = spillPosEven - 4;
777         } else {
778             return spillPos;
779         }
780         assert !allocator.isBlockEnd(maxSpillPos) : "Can no longer be a block end! " + maxSpillPos;
781 
782         /* Skip block begins. */
783         while (allocator.isBlockBegin(maxSpillPos) && maxSpillPos > minSpillPos) {
784             // -2 is block end, -4 is the instruction before
785             maxSpillPos -= 4;
786         }
787         assert minSpillPos <= maxSpillPos;
788         return maxSpillPos;
789     }
790 
791     private boolean isNotBlockBeginOrMerge(int spillPos) {
792         int spillPosEven = spillPos & ~1;
793         return spillPosEven == 0 || !allocator.isBlockBegin(spillPosEven) || SSAUtil.isMerge(allocator.blockForId(spillPosEven));
794     }
795 
796     /**
797      * @param minSpillPos minimal spill position
798      * @param maxSpillPos maximal spill position
799      */
800     private int findOptimalSpillPos(int minSpillPos, int maxSpillPos) {
801         int optimalSpillPos = findOptimalSpillPos0(minSpillPos, maxSpillPos) & (~1);
802         if (debug.isLogEnabled()) {
803             debug.log("optimal spill position: %d", optimalSpillPos);
804         }
805         return optimalSpillPos;
806     }
807 
808     private int findOptimalSpillPos0(int minSpillPos, int maxSpillPos) {
809         if (minSpillPos == maxSpillPos) {
810             // trivial case, no optimization of split position possible
811             if (debug.isLogEnabled()) {
812                 debug.log("min-pos and max-pos are equal, no optimization possible");
813             }
814             return minSpillPos;
815 
816         }
817         assert minSpillPos < maxSpillPos : "must be true then";
818         assert minSpillPos >= 0 : "cannot access minSplitPos - 1 otherwise";
819 
820         AbstractBlockBase<?> minBlock = allocator.blockForId(minSpillPos);
821         AbstractBlockBase<?> maxBlock = allocator.blockForId(maxSpillPos);
822 
823         assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order";
824         if (minBlock == maxBlock) {
825             // split position cannot be moved to block boundary : so split as late as possible
826             if (debug.isLogEnabled()) {
827                 debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block");
828             }
829             return maxSpillPos;
830 
831         }
832         // search optimal block boundary between minSplitPos and maxSplitPos
833         if (debug.isLogEnabled()) {
834             debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId());
835         }
836 
837         // currently using the same heuristic as for splitting
838         return findOptimalSpillPos(minBlock, maxBlock, maxSpillPos);
839     }
840 
841     private int findOptimalSpillPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) {
842         int fromBlockNr = minBlock.getLinearScanNumber();
843         int toBlockNr = maxBlock.getLinearScanNumber();
844 
845         assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range";
846         assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range";
847         assert fromBlockNr < toBlockNr : "must cross block boundary";
848 
849         /*
850          * Try to split at end of maxBlock. We use last instruction -2 because we want to insert the
851          * move before the block end op. If this would be after maxSplitPos, then use the
852          * maxSplitPos.
853          */
854         int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) - 2;
855         if (optimalSplitPos > maxSplitPos) {
856             optimalSplitPos = maxSplitPos;
857         }
858 
859         // minimal block probability
860         double minProbability = maxBlock.probability();
861         for (int i = toBlockNr - 1; i >= fromBlockNr; i--) {
862             AbstractBlockBase<?> cur = blockAt(i);
863 
864             if (cur.probability() < minProbability) {
865                 // Block with lower probability found. Split at the end of this block.
866                 int opIdBeforeBlockEnd = allocator.getLastLirInstructionId(cur) - 2;
867                 if (allocator.getLIR().getLIRforBlock(cur).size() > 2) {
868                     minProbability = cur.probability();
869                     optimalSplitPos = opIdBeforeBlockEnd;
870                 } else {
871                     /*
872                      * Skip blocks with only LabelOp and BlockEndOp since they cause move ordering
873                      * problems.
874                      */
875                     assert allocator.isBlockBegin(opIdBeforeBlockEnd);
876                 }
877             }
878         }
879         assert optimalSplitPos > allocator.maxOpId() || optimalSplitPos == maxSplitPos || allocator.isBlockEnd(optimalSplitPos + 2) : "algorithm must move split pos to block boundary";
880         assert !allocator.isBlockBegin(optimalSplitPos);
881         return optimalSplitPos;
882     }
883 
884     /**
885      * This is called for every interval that is assigned to a stack slot.
886      */
887     private static void handleSpillSlot(TraceInterval interval) {
888         assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval;
889         // Do nothing. Stack slots are not processed in this implementation.
890     }
891 
892     private void splitStackInterval(TraceInterval interval) {
893         int minSplitPos = currentPosition + 1;
894         int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to());
895 
896         splitBeforeUsage(interval, minSplitPos, maxSplitPos);
897     }
898 
899     private void splitWhenPartialRegisterAvailable(TraceInterval interval, int registerAvailableUntil) {
900         int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1);
901         splitBeforeUsage(interval, minSplitPos, registerAvailableUntil);
902     }
903 
904     private void splitAndSpillInterval(TraceInterval interval) {
905         int currentPos = currentPosition;
906         /*
907          * Search the position where the interval must have a register and split at the optimal
908          * position before. The new created part is added to the unhandled list and will get a
909          * register when it is activated.
910          */
911         int minSplitPos = currentPos + 1;
912         int maxSplitPos = interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos);
913 
914         if (maxSplitPos <= interval.to()) {
915             splitBeforeUsage(interval, minSplitPos, maxSplitPos);
916         } else {
917             debug.log("No more usage, no need to split: %s", interval);
918         }
919 
920         assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register";
921         splitForSpilling(interval);
922     }
923 
924     @SuppressWarnings("try")
925     private boolean allocFreeRegister(TraceInterval interval) {
926         try (Indent indent = debug.logAndIndent("trying to find free register for %s", interval)) {
927 
928             initUseLists(true);
929             freeExcludeActiveFixed();
930             freeCollectInactiveFixed(interval);
931             freeExcludeActiveAny();
932             // freeCollectUnhandled(fixedKind, cur);
933 
934             // usePos contains the start of the next interval that has this register assigned
935             // (either as a fixed register or a normal allocated register in the past)
936             // only intervals overlapping with cur are processed, non-overlapping invervals can be
937             // ignored safely
938             if (debug.isLogEnabled()) {
939                 // Enable this logging to see all register states
940                 try (Indent indent2 = debug.logAndIndent("state of registers:")) {
941                     for (Register register : availableRegs) {
942                         int i = register.number;
943                         debug.log("reg %d (%s): usePos: %d", register.number, register, usePos[i]);
944                     }
945                 }
946             }
947 
948             Register hint = null;
949             IntervalHint locationHint = interval.locationHint(true);
950             if (locationHint != null && locationHint.location() != null && isRegister(locationHint.location())) {
951                 hint = asRegister(locationHint.location());
952                 if (debug.isLogEnabled()) {
953                     debug.log("hint register %3d (%4s) from interval %s", hint.number, hint, locationHint);
954                 }
955             }
956             assert interval.location() == null : "register already assigned to interval";
957 
958             // the register must be free at least until this position
959             int regNeededUntil = interval.from() + 1;
960             int intervalTo = interval.to();
961 
962             boolean needSplit = false;
963             int splitPos = -1;
964 
965             Register reg = null;
966             Register minFullReg = null;
967             Register maxPartialReg = null;
968 
969             for (Register availableReg : availableRegs) {
970                 int number = availableReg.number;
971                 if (usePos[number] >= intervalTo) {
972                     // this register is free for the full interval
973                     if (minFullReg == null || availableReg.equals(hint) || (usePos[number] < usePos[minFullReg.number] && !minFullReg.equals(hint))) {
974                         minFullReg = availableReg;
975                     }
976                 } else if (usePos[number] > regNeededUntil) {
977                     // this register is at least free until regNeededUntil
978                     if (maxPartialReg == null || availableReg.equals(hint) || (usePos[number] > usePos[maxPartialReg.number] && !maxPartialReg.equals(hint))) {
979                         maxPartialReg = availableReg;
980                     }
981                 }
982             }
983 
984             if (minFullReg != null) {
985                 reg = minFullReg;
986             } else if (maxPartialReg != null) {
987                 needSplit = true;
988                 reg = maxPartialReg;
989             } else {
990                 return false;
991             }
992 
993             splitPos = usePos[reg.number];
994             interval.assignLocation(reg.asValue(allocator.getKind(interval)));
995             if (debug.isLogEnabled()) {
996                 debug.log("selected register %d (%s)", reg.number, reg);
997             }
998 
999             assert splitPos > 0 : "invalid splitPos";
1000             if (needSplit) {
1001                 // register not available for full interval, so split it
1002                 splitWhenPartialRegisterAvailable(interval, splitPos);
1003             }
1004             // only return true if interval is completely assigned
1005             return true;
1006         }
1007     }
1008 
1009     private void splitAndSpillIntersectingIntervals(Register reg) {
1010         assert reg != null : "no register assigned";
1011 
1012         for (int i = 0; i < spillIntervals[reg.number].size(); i++) {
1013             TraceInterval interval = spillIntervals[reg.number].get(i);
1014             removeFromList(interval);
1015             splitAndSpillInterval(interval);
1016         }
1017     }
1018 
1019     // Split an Interval and spill it to memory so that cur can be placed in a register
1020     @SuppressWarnings("try")
1021     private void allocLockedRegister(TraceInterval interval) {
1022         try (Indent indent = debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) {
1023 
1024             // the register must be free at least until this position
1025             int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister);
1026             int firstShouldHaveUsage = interval.firstUsage(RegisterPriority.ShouldHaveRegister);
1027             int regNeededUntil = Math.min(firstUsage, interval.from() + 1);
1028             int intervalTo = interval.to();
1029             assert regNeededUntil >= 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use";
1030 
1031             Register reg;
1032             Register ignore;
1033             /*
1034              * In the common case we don't spill registers that have _any_ use position that is
1035              * closer than the next use of the current interval, but if we can't spill the current
1036              * interval we weaken this strategy and also allow spilling of intervals that have a
1037              * non-mandatory requirements (no MustHaveRegister use position).
1038              */
1039             for (RegisterPriority registerPriority = RegisterPriority.LiveAtLoopEnd; true; registerPriority = RegisterPriority.MustHaveRegister) {
1040                 // collect current usage of registers
1041                 initUseLists(false);
1042                 spillExcludeActiveFixed();
1043                 // spillBlockUnhandledFixed(cur);
1044                 spillBlockInactiveFixed(interval);
1045                 spillCollectActiveAny(registerPriority);
1046                 if (debug.isLogEnabled()) {
1047                     printRegisterState();
1048                 }
1049 
1050                 reg = null;
1051                 ignore = interval.location() != null && isRegister(interval.location()) ? asRegister(interval.location()) : null;
1052 
1053                 for (Register availableReg : availableRegs) {
1054                     int number = availableReg.number;
1055                     if (availableReg.equals(ignore)) {
1056                         // this register must be ignored
1057                     } else if (usePos[number] > regNeededUntil) {
1058                         /*
1059                          * If the use position is the same, prefer registers (active intervals)
1060                          * where the value is already on the stack.
1061                          */
1062                         if (reg == null || (usePos[number] > usePos[reg.number]) || (usePos[number] == usePos[reg.number] && (!isInMemory.get(reg.number) && isInMemory.get(number)))) {
1063                             reg = availableReg;
1064                         }
1065                     }
1066                 }
1067 
1068                 if (debug.isLogEnabled()) {
1069                     debug.log("Register Selected: %s", reg);
1070                 }
1071 
1072                 int regUsePos = (reg == null ? 0 : usePos[reg.number]);
1073                 if (regUsePos <= firstShouldHaveUsage) {
1074                     /* Check if there is another interval that is already in memory. */
1075                     if (reg == null || interval.inMemoryAt(currentPosition) || !isInMemory.get(reg.number)) {
1076                         if (debug.isLogEnabled()) {
1077                             debug.log("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, regUsePos);
1078                         }
1079 
1080                         if (firstUsage <= interval.from() + 1) {
1081                             if (registerPriority.equals(RegisterPriority.LiveAtLoopEnd)) {
1082                                 /*
1083                                  * Tool of last resort: we can not spill the current interval so we
1084                                  * try to spill an active interval that has a usage but do not
1085                                  * require a register.
1086                                  */
1087                                 debug.log("retry with register priority must have register");
1088                                 continue;
1089                             }
1090                             String description = "cannot spill interval (" + interval + ") that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage +
1091                                             ", interval.from()=" + interval.from() + "; already used candidates: " + Arrays.toString(availableRegs);
1092                             /*
1093                              * assign a reasonable register and do a bailout in product mode to
1094                              * avoid errors
1095                              */
1096                             allocator.assignSpillSlot(interval);
1097                             if (debug.isDumpEnabled(DebugContext.INFO_LEVEL)) {
1098                                 dumpLIRAndIntervals(description);
1099                             }
1100                             throw new OutOfRegistersException("LinearScan: no register found", description);
1101                         }
1102 
1103                         splitAndSpillInterval(interval);
1104                         return;
1105                     }
1106                 }
1107                 // common case: break out of the loop
1108                 break;
1109             }
1110 
1111             boolean needSplit = blockPos[reg.number] <= intervalTo;
1112 
1113             int splitPos = blockPos[reg.number];
1114 
1115             if (debug.isLogEnabled()) {
1116                 debug.log("decided to use register %d", reg.number);
1117             }
1118             assert splitPos > 0 : "invalid splitPos";
1119             assert needSplit || splitPos > interval.from() : "splitting interval at from";
1120 
1121             interval.assignLocation(reg.asValue(allocator.getKind(interval)));
1122             if (needSplit) {
1123                 // register not available for full interval : so split it
1124                 splitWhenPartialRegisterAvailable(interval, splitPos);
1125             }
1126 
1127             // perform splitting and spilling for all affected intervals
1128             splitAndSpillIntersectingIntervals(reg);
1129             return;
1130         }
1131     }
1132 
1133     private void dumpLIRAndIntervals(String description) {
1134         debug.dump(DebugContext.INFO_LEVEL, allocator.getLIR(), description);
1135         allocator.printIntervals(description);
1136     }
1137 
1138     @SuppressWarnings("try")
1139     private void printRegisterState() {
1140         try (Indent indent2 = debug.logAndIndent("state of registers:")) {
1141             for (Register reg : availableRegs) {
1142                 int i = reg.number;
1143                 try (Indent indent3 = debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, inMemory: %b, intervals: ", i, usePos[i], blockPos[i], isInMemory.get(i))) {
1144                     for (int j = 0; j < spillIntervals[i].size(); j++) {
1145                         debug.log("%s", spillIntervals[i].get(j));
1146                     }
1147                 }
1148             }
1149         }
1150     }
1151 
1152     private boolean noAllocationPossible(TraceInterval interval) {
1153         if (allocator.callKillsRegisters()) {
1154             // fast calculation of intervals that can never get a register because the
1155             // the next instruction is a call that blocks all registers
1156             // Note: this only works if a call kills all registers
1157 
1158             // check if this interval is the result of a split operation
1159             // (an interval got a register until this position)
1160             int pos = interval.from();
1161             if (isOdd(pos)) {
1162                 // the current instruction is a call that blocks all registers
1163                 if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) {
1164                     if (debug.isLogEnabled()) {
1165                         debug.log("free register cannot be available because all registers blocked by following call");
1166                     }
1167 
1168                     // safety check that there is really no register available
1169                     assert !allocFreeRegister(interval) : "found a register for this interval";
1170                     return true;
1171                 }
1172             }
1173         }
1174         return false;
1175     }
1176 
1177     private void initVarsForAlloc(TraceInterval interval) {
1178         AllocatableRegisters allocatableRegisters = allocator.getRegisterAllocationConfig().getAllocatableRegisters(allocator.getKind(interval).getPlatformKind());
1179         availableRegs = allocatableRegisters.allocatableRegisters;
1180         minReg = allocatableRegisters.minRegisterNumber;
1181         maxReg = allocatableRegisters.maxRegisterNumber;
1182     }
1183 
1184     private static boolean isMove(LIRInstruction op, TraceInterval from, TraceInterval to) {
1185         if (ValueMoveOp.isValueMoveOp(op)) {
1186             ValueMoveOp move = ValueMoveOp.asValueMoveOp(op);
1187             if (isVariable(move.getInput()) && isVariable(move.getResult())) {
1188                 return move.getInput() != null && LIRValueUtil.asVariable(move.getInput()).index == from.operandNumber && move.getResult() != null &&
1189                                 LIRValueUtil.asVariable(move.getResult()).index == to.operandNumber;
1190             }
1191         }
1192         return false;
1193     }
1194 
1195     // optimization (especially for phi functions of nested loops):
1196     // assign same spill slot to non-intersecting intervals
1197     private void combineSpilledIntervals(TraceInterval interval) {
1198         if (interval.isSplitChild()) {
1199             // optimization is only suitable for split parents
1200             return;
1201         }
1202 
1203         IntervalHint locationHint = interval.locationHint(false);
1204         if (locationHint == null || !(locationHint instanceof TraceInterval)) {
1205             return;
1206         }
1207         TraceInterval registerHint = (TraceInterval) locationHint;
1208         assert registerHint.isSplitParent() : "register hint must be split parent";
1209 
1210         if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) {
1211             // combining the stack slots for intervals where spill move optimization is applied
1212             // is not benefitial and would cause problems
1213             return;
1214         }
1215 
1216         int beginPos = interval.from();
1217         int endPos = interval.to();
1218         if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) {
1219             // safety check that lirOpWithId is allowed
1220             return;
1221         }
1222 
1223         if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) {
1224             // cur and registerHint are not connected with two moves
1225             return;
1226         }
1227 
1228         TraceInterval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.USE);
1229         TraceInterval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.DEF);
1230         if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) {
1231             // registerHint must be split : otherwise the re-writing of use positions does not work
1232             return;
1233         }
1234 
1235         assert beginHint.location() != null : "must have register assigned";
1236         assert endHint.location() == null : "must not have register assigned";
1237         assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move";
1238         assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move";
1239 
1240         if (isRegister(beginHint.location())) {
1241             // registerHint is not spilled at beginPos : so it would not be benefitial to
1242             // immediately spill cur
1243             return;
1244         }
1245         assert registerHint.spillSlot() != null : "must be set when part of interval was spilled";
1246 
1247         // modify intervals such that cur gets the same stack slot as registerHint
1248         // delete use positions to prevent the intervals to get a register at beginning
1249         interval.setSpillSlot(registerHint.spillSlot());
1250         interval.removeFirstUsePos();
1251         endHint.removeFirstUsePos();
1252     }
1253 
1254     // allocate a physical register or memory location to an interval
1255     @SuppressWarnings("try")
1256     private boolean activateCurrent(TraceInterval interval) {
1257         if (debug.isLogEnabled()) {
1258             logCurrentStatus();
1259         }
1260         boolean result = true;
1261 
1262         try (Indent indent = debug.logAndIndent("activating interval %s,  splitParent: %d", interval, interval.splitParent().operandNumber)) {
1263 
1264             if (interval.location() != null && isStackSlotValue(interval.location())) {
1265                 // activating an interval that has a stack slot assigned . split it at first use
1266                 // position
1267                 // used for method parameters
1268                 if (debug.isLogEnabled()) {
1269                     debug.log("interval has spill slot assigned (method parameter) . split it before first use");
1270                 }
1271                 splitStackInterval(interval);
1272                 result = false;
1273 
1274             } else {
1275                 if (interval.location() == null) {
1276                     // interval has not assigned register . normal allocation
1277                     // (this is the normal case for most intervals)
1278                     if (debug.isLogEnabled()) {
1279                         debug.log("normal allocation of register");
1280                     }
1281 
1282                     // assign same spill slot to non-intersecting intervals
1283                     combineSpilledIntervals(interval);
1284 
1285                     initVarsForAlloc(interval);
1286                     if (noAllocationPossible(interval) || !allocFreeRegister(interval)) {
1287                         // no empty register available.
1288                         // split and spill another interval so that this interval gets a register
1289                         allocLockedRegister(interval);
1290                     }
1291 
1292                     // spilled intervals need not be move to active-list
1293                     if (!isRegister(interval.location())) {
1294                         result = false;
1295                     }
1296                 }
1297             }
1298 
1299             // load spilled values that become active from stack slot to register
1300             if (interval.insertMoveWhenActivated()) {
1301                 assert interval.isSplitChild();
1302                 assert interval.currentSplitChild() != null;
1303                 assert interval.currentSplitChild().operandNumber != interval.operandNumber : "cannot insert move between same interval";
1304                 if (debug.isLogEnabled()) {
1305                     debug.log("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber);
1306                 }
1307 
1308                 insertMove(interval.from(), interval.currentSplitChild(), interval);
1309             }
1310             interval.makeCurrentSplitChild();
1311 
1312         }
1313 
1314         return result; // true = interval is moved to active list
1315     }
1316 
1317     void finishAllocation() {
1318         // must be called when all intervals are allocated
1319         moveResolver.resolveAndAppendMoves();
1320     }
1321 
1322     @SuppressWarnings("try")
1323     private void logCurrentStatus() {
1324         try (Indent i = debug.logAndIndent("active:")) {
1325             logList(debug, activeFixedList);
1326             logList(debug, activeAnyList);
1327         }
1328         try (Indent i = debug.logAndIndent("inactive(fixed):")) {
1329             logList(debug, inactiveFixedList);
1330         }
1331     }
1332 
1333     void walk() {
1334         walkTo(Integer.MAX_VALUE);
1335     }
1336 
1337     private void removeFromList(TraceInterval interval) {
1338         activeAnyList = TraceLinearScanWalker.removeAny(activeAnyList, interval);
1339     }
1340 
1341     /**
1342      * Walks up to {@code from} and updates the state of {@link FixedInterval fixed intervals}.
1343      *
1344      * Fixed intervals can switch back and forth between the states {@link State#Active} and
1345      * {@link State#Inactive} (and eventually to {@link State#Handled} but handled intervals are not
1346      * managed).
1347      */
1348     @SuppressWarnings("try")
1349     private void walkToFixed(State state, int from) {
1350         assert state == State.Active || state == State.Inactive : "wrong state";
1351         FixedInterval prevprev = null;
1352         FixedInterval prev = (state == State.Active) ? activeFixedList : inactiveFixedList;
1353         FixedInterval next = prev;
1354         if (debug.isLogEnabled()) {
1355             try (Indent i = debug.logAndIndent("walkToFixed(%s, %d):", state, from)) {
1356                 logList(debug, next);
1357             }
1358         }
1359         while (next.currentFrom() <= from) {
1360             FixedInterval cur = next;
1361             next = cur.next;
1362 
1363             boolean rangeHasChanged = false;
1364             while (cur.currentTo() <= from) {
1365                 cur.nextRange();
1366                 rangeHasChanged = true;
1367             }
1368 
1369             // also handle move from inactive list to active list
1370             rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from);
1371 
1372             if (rangeHasChanged) {
1373                 // remove cur from list
1374                 if (prevprev == null) {
1375                     if (state == State.Active) {
1376                         activeFixedList = next;
1377                     } else {
1378                         inactiveFixedList = next;
1379                     }
1380                 } else {
1381                     prevprev.next = next;
1382                 }
1383                 prev = next;
1384                 TraceInterval.State newState;
1385                 if (cur.currentAtEnd()) {
1386                     // move to handled state (not maintained as a list)
1387                     newState = State.Handled;
1388                 } else {
1389                     if (cur.currentFrom() <= from) {
1390                         // sort into active list
1391                         activeFixedList = TraceLinearScanWalker.addToListSortedByCurrentFromPositions(activeFixedList, cur);
1392                         newState = State.Active;
1393                     } else {
1394                         // sort into inactive list
1395                         inactiveFixedList = TraceLinearScanWalker.addToListSortedByCurrentFromPositions(inactiveFixedList, cur);
1396                         newState = State.Inactive;
1397                     }
1398                     if (prev == cur) {
1399                         assert state == newState;
1400                         prevprev = prev;
1401                         prev = cur.next;
1402                     }
1403                 }
1404                 intervalMoved(debug, cur, state, newState);
1405             } else {
1406                 prevprev = prev;
1407                 prev = cur.next;
1408             }
1409         }
1410     }
1411 
1412     /**
1413      * Walks up to {@code from} and updates the state of {@link TraceInterval intervals}.
1414      *
1415      * Trace intervals can switch once from {@link State#Unhandled} to {@link State#Active} and then
1416      * to {@link State#Handled} but handled intervals are not managed.
1417      */
1418     @SuppressWarnings("try")
1419     private void walkToAny(int from) {
1420         TraceInterval prevprev = null;
1421         TraceInterval prev = activeAnyList;
1422         TraceInterval next = prev;
1423         if (debug.isLogEnabled()) {
1424             try (Indent i = debug.logAndIndent("walkToAny(%d):", from)) {
1425                 logList(debug, next);
1426             }
1427         }
1428         while (next.from() <= from) {
1429             TraceInterval cur = next;
1430             next = cur.next;
1431 
1432             if (cur.to() <= from) {
1433                 // remove cur from list
1434                 if (prevprev == null) {
1435                     activeAnyList = next;
1436                 } else {
1437                     prevprev.next = next;
1438                 }
1439                 intervalMoved(debug, cur, State.Active, State.Handled);
1440             } else {
1441                 prevprev = prev;
1442             }
1443             prev = next;
1444         }
1445     }
1446 
1447     /**
1448      * Get the next interval from {@linkplain #unhandledAnyList} which starts before or at
1449      * {@code toOpId}. The returned interval is removed.
1450      *
1451      * @postcondition all intervals in {@linkplain #unhandledAnyList} start after {@code toOpId}.
1452      *
1453      * @return The next interval or null if there is no {@linkplain #unhandledAnyList unhandled}
1454      *         interval at position {@code toOpId}.
1455      */
1456     private TraceInterval nextInterval(int toOpId) {
1457         TraceInterval any = unhandledAnyList;
1458 
1459         if (any != TraceInterval.EndMarker) {
1460             TraceInterval currentInterval = unhandledAnyList;
1461             if (toOpId < currentInterval.from()) {
1462                 return null;
1463             }
1464 
1465             unhandledAnyList = currentInterval.next;
1466             currentInterval.next = TraceInterval.EndMarker;
1467             return currentInterval;
1468         }
1469         return null;
1470 
1471     }
1472 
1473     /**
1474      * Walk up to {@code toOpId}.
1475      *
1476      * @postcondition {@link #currentPosition} is set to {@code toOpId}, {@link #activeFixedList}
1477      *                and {@link #inactiveFixedList} are populated.
1478      */
1479     @SuppressWarnings("try")
1480     private void walkTo(int toOpId) {
1481         assert currentPosition <= toOpId : "can not walk backwards";
1482         for (TraceInterval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) {
1483             int opId = currentInterval.from();
1484 
1485             // set currentPosition prior to call of walkTo
1486             currentPosition = opId;
1487 
1488             // update unhandled stack intervals
1489             // updateUnhandledStackIntervals(opId);
1490 
1491             // call walkTo even if currentPosition == id
1492             walkToFixed(State.Active, opId);
1493             walkToFixed(State.Inactive, opId);
1494             walkToAny(opId);
1495 
1496             try (Indent indent = debug.logAndIndent("walk to op %d", opId)) {
1497                 if (activateCurrent(currentInterval)) {
1498                     activeAnyList = TraceLinearScanWalker.addToListSortedByFromPositions(activeAnyList, currentInterval);
1499                     intervalMoved(debug, currentInterval, State.Unhandled, State.Active);
1500                 }
1501             }
1502         }
1503         // set currentPosition prior to call of walkTo
1504         currentPosition = toOpId;
1505 
1506         if (currentPosition <= allocator.maxOpId()) {
1507             // update unhandled stack intervals
1508             // updateUnhandledStackIntervals(toOpId);
1509 
1510             // call walkTo if still in range
1511             walkToFixed(State.Active, toOpId);
1512             walkToFixed(State.Inactive, toOpId);
1513             walkToAny(toOpId);
1514         }
1515     }
1516 
1517     private static void logList(DebugContext debug, FixedInterval i) {
1518         for (FixedInterval interval = i; interval != FixedInterval.EndMarker; interval = interval.next) {
1519             debug.log("%s", interval.logString());
1520         }
1521     }
1522 
1523     private static void logList(DebugContext debug, TraceInterval i) {
1524         for (TraceInterval interval = i; interval != TraceInterval.EndMarker; interval = interval.next) {
1525             debug.log("%s", interval.logString());
1526         }
1527     }
1528 
1529     private static void intervalMoved(DebugContext debug, IntervalHint interval, State from, State to) {
1530         // intervalMoved() is called whenever an interval moves from one interval list to another.
1531         // In the implementation of this method it is prohibited to move the interval to any list.
1532         if (debug.isLogEnabled()) {
1533             debug.log("interval moved from %s to %s: %s", from, to, interval.logString());
1534         }
1535     }
1536 }
1537