Mercurial > hg > truffle
comparison graal/Compiler/src/com/sun/c1x/alloc/LinearScanWalker.java @ 2507:9ec15d6914ca
Pull over of compiler from maxine repository.
author | Thomas Wuerthinger <thomas@wuerthinger.net> |
---|---|
date | Wed, 27 Apr 2011 11:43:22 +0200 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
2506:4a3bf8a5bf41 | 2507:9ec15d6914ca |
---|---|
1 /* | |
2 * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
20 * or visit www.oracle.com if you need additional information or have any | |
21 * questions. | |
22 */ | |
23 package com.sun.c1x.alloc; | |
24 | |
25 import static com.sun.cri.ci.CiUtil.*; | |
26 | |
27 import java.util.*; | |
28 | |
29 import com.sun.c1x.*; | |
30 import com.sun.c1x.alloc.Interval.RegisterBinding; | |
31 import com.sun.c1x.alloc.Interval.RegisterPriority; | |
32 import com.sun.c1x.alloc.Interval.SpillState; | |
33 import com.sun.c1x.alloc.Interval.State; | |
34 import com.sun.c1x.debug.*; | |
35 import com.sun.c1x.ir.*; | |
36 import com.sun.c1x.lir.*; | |
37 import com.sun.c1x.util.*; | |
38 import com.sun.cri.ci.*; | |
39 import com.sun.cri.ci.CiRegister.RegisterFlag; | |
40 | |
41 /** | |
42 * | |
43 * @author Thomas Wuerthinger | |
44 */ | |
45 final class LinearScanWalker extends IntervalWalker { | |
46 | |
47 private CiRegister[] availableRegs; | |
48 | |
49 private final int[] usePos; | |
50 private final int[] blockPos; | |
51 | |
52 private List<Interval>[] spillIntervals; | |
53 | |
54 private MoveResolver moveResolver; // for ordering spill moves | |
55 | |
56 // accessors mapped to same functions in class LinearScan | |
57 int blockCount() { | |
58 return allocator.blockCount(); | |
59 } | |
60 | |
61 BlockBegin blockAt(int idx) { | |
62 return allocator.blockAt(idx); | |
63 } | |
64 | |
65 BlockBegin blockOfOpWithId(int opId) { | |
66 return allocator.blockForId(opId); | |
67 } | |
68 | |
69 LinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { | |
70 super(allocator, unhandledFixedFirst, unhandledAnyFirst); | |
71 moveResolver = new MoveResolver(allocator); | |
72 spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]); | |
73 for (int i = 0; i < allocator.registers.length; i++) { | |
74 spillIntervals[i] = new ArrayList<Interval>(2); | |
75 } | |
76 usePos = new int[allocator.registers.length]; | |
77 blockPos = new int[allocator.registers.length]; | |
78 } | |
79 | |
80 void initUseLists(boolean onlyProcessUsePos) { | |
81 for (CiRegister register : availableRegs) { | |
82 int i = register.number; | |
83 usePos[i] = Integer.MAX_VALUE; | |
84 | |
85 if (!onlyProcessUsePos) { | |
86 blockPos[i] = Integer.MAX_VALUE; | |
87 spillIntervals[i].clear(); | |
88 } | |
89 } | |
90 } | |
91 | |
92 void excludeFromUse(Interval i) { | |
93 CiValue location = i.location(); | |
94 int i1 = location.asRegister().number; | |
95 if (i1 >= availableRegs[0].number && i1 <= availableRegs[availableRegs.length - 1].number) { | |
96 usePos[i1] = 0; | |
97 } | |
98 } | |
99 | |
100 void setUsePos(Interval interval, int usePos, boolean onlyProcessUsePos) { | |
101 if (usePos != -1) { | |
102 assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; | |
103 int i = interval.location().asRegister().number; | |
104 if (i >= availableRegs[0].number && i <= availableRegs[availableRegs.length - 1].number) { | |
105 if (this.usePos[i] > usePos) { | |
106 this.usePos[i] = usePos; | |
107 } | |
108 if (!onlyProcessUsePos) { | |
109 spillIntervals[i].add(interval); | |
110 } | |
111 } | |
112 } | |
113 } | |
114 | |
115 void setBlockPos(Interval i, int blockPos) { | |
116 if (blockPos != -1) { | |
117 int reg = i.location().asRegister().number; | |
118 if (reg >= availableRegs[0].number && reg <= availableRegs[availableRegs.length - 1].number) { | |
119 if (this.blockPos[reg] > blockPos) { | |
120 this.blockPos[reg] = blockPos; | |
121 } | |
122 if (usePos[reg] > blockPos) { | |
123 usePos[reg] = blockPos; | |
124 } | |
125 } | |
126 } | |
127 } | |
128 | |
129 void freeExcludeActiveFixed() { | |
130 Interval interval = activeLists.get(RegisterBinding.Fixed); | |
131 while (interval != Interval.EndMarker) { | |
132 assert interval.location().isRegister() : "active interval must have a register assigned"; | |
133 excludeFromUse(interval); | |
134 interval = interval.next; | |
135 } | |
136 } | |
137 | |
138 void freeExcludeActiveAny() { | |
139 Interval interval = activeLists.get(RegisterBinding.Any); | |
140 while (interval != Interval.EndMarker) { | |
141 assert interval.location().isRegister() : "active interval must have a register assigned"; | |
142 excludeFromUse(interval); | |
143 interval = interval.next; | |
144 } | |
145 } | |
146 | |
147 void freeCollectInactiveFixed(Interval current) { | |
148 Interval interval = inactiveLists.get(RegisterBinding.Fixed); | |
149 while (interval != Interval.EndMarker) { | |
150 if (current.to() <= interval.currentFrom()) { | |
151 assert interval.currentIntersectsAt(current) == -1 : "must not intersect"; | |
152 setUsePos(interval, interval.currentFrom(), true); | |
153 } else { | |
154 setUsePos(interval, interval.currentIntersectsAt(current), true); | |
155 } | |
156 interval = interval.next; | |
157 } | |
158 } | |
159 | |
160 void freeCollectInactiveAny(Interval current) { | |
161 Interval interval = inactiveLists.get(RegisterBinding.Any); | |
162 while (interval != Interval.EndMarker) { | |
163 setUsePos(interval, interval.currentIntersectsAt(current), true); | |
164 interval = interval.next; | |
165 } | |
166 } | |
167 | |
168 void freeCollectUnhandled(RegisterBinding kind, Interval current) { | |
169 Interval interval = unhandledLists.get(kind); | |
170 while (interval != Interval.EndMarker) { | |
171 setUsePos(interval, interval.intersectsAt(current), true); | |
172 if (kind == RegisterBinding.Fixed && current.to() <= interval.from()) { | |
173 setUsePos(interval, interval.from(), true); | |
174 } | |
175 interval = interval.next; | |
176 } | |
177 } | |
178 | |
179 void spillExcludeActiveFixed() { | |
180 Interval interval = activeLists.get(RegisterBinding.Fixed); | |
181 while (interval != Interval.EndMarker) { | |
182 excludeFromUse(interval); | |
183 interval = interval.next; | |
184 } | |
185 } | |
186 | |
187 void spillBlockUnhandledFixed(Interval current) { | |
188 Interval interval = unhandledLists.get(RegisterBinding.Fixed); | |
189 while (interval != Interval.EndMarker) { | |
190 setBlockPos(interval, interval.intersectsAt(current)); | |
191 interval = interval.next; | |
192 } | |
193 } | |
194 | |
195 void spillBlockInactiveFixed(Interval current) { | |
196 Interval interval = inactiveLists.get(RegisterBinding.Fixed); | |
197 while (interval != Interval.EndMarker) { | |
198 if (current.to() > interval.currentFrom()) { | |
199 setBlockPos(interval, interval.currentIntersectsAt(current)); | |
200 } else { | |
201 assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect"; | |
202 } | |
203 | |
204 interval = interval.next; | |
205 } | |
206 } | |
207 | |
208 void spillCollectActiveAny() { | |
209 Interval interval = activeLists.get(RegisterBinding.Any); | |
210 while (interval != Interval.EndMarker) { | |
211 setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); | |
212 interval = interval.next; | |
213 } | |
214 } | |
215 | |
216 void spillCollectInactiveAny(Interval current) { | |
217 Interval interval = inactiveLists.get(RegisterBinding.Any); | |
218 while (interval != Interval.EndMarker) { | |
219 if (interval.currentIntersects(current)) { | |
220 setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); | |
221 } | |
222 interval = interval.next; | |
223 } | |
224 } | |
225 | |
226 void insertMove(int opId, Interval srcIt, Interval dstIt) { | |
227 // output all moves here. When source and target are equal, the move is | |
228 // optimized away later in assignRegNums | |
229 | |
230 opId = (opId + 1) & ~1; | |
231 BlockBegin opBlock = allocator.blockForId(opId); | |
232 assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary"; | |
233 | |
234 // calculate index of instruction inside instruction list of current block | |
235 // the minimal index (for a block with no spill moves) can be calculated because the | |
236 // numbering of instructions is known. | |
237 // When the block already contains spill moves, the index must be increased until the | |
238 // correct index is reached. | |
239 List<LIRInstruction> list = opBlock.lir().instructionsList(); | |
240 int index = (opId - list.get(0).id) >> 1; | |
241 assert list.get(index).id <= opId : "error in calculation"; | |
242 | |
243 while (list.get(index).id != opId) { | |
244 index++; | |
245 assert 0 <= index && index < list.size() : "index out of bounds"; | |
246 } | |
247 assert 1 <= index && index < list.size() : "index out of bounds"; | |
248 assert list.get(index).id == opId : "error in calculation"; | |
249 | |
250 // insert new instruction before instruction at position index | |
251 moveResolver.moveInsertPosition(opBlock.lir(), index - 1); | |
252 moveResolver.addMapping(srcIt, dstIt); | |
253 } | |
254 | |
255 int findOptimalSplitPos(BlockBegin minBlock, BlockBegin maxBlock, int maxSplitPos) { | |
256 int fromBlockNr = minBlock.linearScanNumber(); | |
257 int toBlockNr = maxBlock.linearScanNumber(); | |
258 | |
259 assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; | |
260 assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; | |
261 assert fromBlockNr < toBlockNr : "must cross block boundary"; | |
262 | |
263 // Try to split at end of maxBlock. If this would be after | |
264 // maxSplitPos, then use the begin of maxBlock | |
265 int optimalSplitPos = maxBlock.lastLirInstructionId() + 2; | |
266 if (optimalSplitPos > maxSplitPos) { | |
267 optimalSplitPos = maxBlock.firstLirInstructionId(); | |
268 } | |
269 | |
270 int minLoopDepth = maxBlock.loopDepth(); | |
271 for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { | |
272 BlockBegin cur = blockAt(i); | |
273 | |
274 if (cur.loopDepth() < minLoopDepth) { | |
275 // block with lower loop-depth found . split at the end of this block | |
276 minLoopDepth = cur.loopDepth(); | |
277 optimalSplitPos = cur.lastLirInstructionId() + 2; | |
278 } | |
279 } | |
280 assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary"; | |
281 | |
282 return optimalSplitPos; | |
283 } | |
284 | |
285 int findOptimalSplitPos(Interval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) { | |
286 int optimalSplitPos = -1; | |
287 if (minSplitPos == maxSplitPos) { | |
288 // trivial case, no optimization of split position possible | |
289 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
290 TTY.println(" min-pos and max-pos are equal, no optimization possible"); | |
291 } | |
292 optimalSplitPos = minSplitPos; | |
293 | |
294 } else { | |
295 assert minSplitPos < maxSplitPos : "must be true then"; | |
296 assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise"; | |
297 | |
298 // reason for using minSplitPos - 1: when the minimal split pos is exactly at the | |
299 // beginning of a block, then minSplitPos is also a possible split position. | |
300 // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 == minSplitPos | |
301 BlockBegin minBlock = allocator.blockForId(minSplitPos - 1); | |
302 | |
303 // reason for using maxSplitPos - 1: otherwise there would be an assert on failure | |
304 // when an interval ends at the end of the last block of the method | |
305 // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no | |
306 // block at this opId) | |
307 BlockBegin maxBlock = allocator.blockForId(maxSplitPos - 1); | |
308 | |
309 assert minBlock.linearScanNumber() <= maxBlock.linearScanNumber() : "invalid order"; | |
310 if (minBlock == maxBlock) { | |
311 // split position cannot be moved to block boundary : so split as late as possible | |
312 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
313 TTY.println(" cannot move split pos to block boundary because minPos and maxPos are in same block"); | |
314 } | |
315 optimalSplitPos = maxSplitPos; | |
316 | |
317 } else { | |
318 if (interval.hasHoleBetween(maxSplitPos - 1, maxSplitPos) && !allocator.isBlockBegin(maxSplitPos)) { | |
319 // Do not move split position if the interval has a hole before maxSplitPos. | |
320 // Intervals resulting from Phi-Functions have more than one definition (marked | |
321 // as mustHaveRegister) with a hole before each definition. When the register is needed | |
322 // for the second definition : an earlier reloading is unnecessary. | |
323 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
324 TTY.println(" interval has hole just before maxSplitPos, so splitting at maxSplitPos"); | |
325 } | |
326 optimalSplitPos = maxSplitPos; | |
327 | |
328 } else { | |
329 // seach optimal block boundary between minSplitPos and maxSplitPos | |
330 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
331 TTY.println(" moving split pos to optimal block boundary between block B%d and B%d", minBlock.blockID, maxBlock.blockID); | |
332 } | |
333 | |
334 if (doLoopOptimization) { | |
335 // Loop optimization: if a loop-end marker is found between min- and max-position : | |
336 // then split before this loop | |
337 int loopEndPos = interval.nextUsageExact(RegisterPriority.LiveAtLoopEnd, minBlock.lastLirInstructionId() + 2); | |
338 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
339 TTY.println(" loop optimization: loop end found at pos %d", loopEndPos); | |
340 } | |
341 | |
342 assert loopEndPos > minSplitPos : "invalid order"; | |
343 if (loopEndPos < maxSplitPos) { | |
344 // loop-end marker found between min- and max-position | |
345 // if it is not the end marker for the same loop as the min-position : then move | |
346 // the max-position to this loop block. | |
347 // Desired result: uses tagged as shouldHaveRegister inside a loop cause a reloading | |
348 // of the interval (normally, only mustHaveRegister causes a reloading) | |
349 BlockBegin loopBlock = allocator.blockForId(loopEndPos); | |
350 | |
351 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
352 TTY.println(" interval is used in loop that ends in block B%d, so trying to move maxBlock back from B%d to B%d", loopBlock.blockID, maxBlock.blockID, loopBlock.blockID); | |
353 } | |
354 assert loopBlock != minBlock : "loopBlock and minBlock must be different because block boundary is needed between"; | |
355 | |
356 optimalSplitPos = findOptimalSplitPos(minBlock, loopBlock, loopBlock.lastLirInstructionId() + 2); | |
357 if (optimalSplitPos == loopBlock.lastLirInstructionId() + 2) { | |
358 optimalSplitPos = -1; | |
359 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
360 TTY.println(" loop optimization not necessary"); | |
361 } | |
362 } else { | |
363 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
364 TTY.println(" loop optimization successful"); | |
365 } | |
366 } | |
367 } | |
368 } | |
369 | |
370 if (optimalSplitPos == -1) { | |
371 // not calculated by loop optimization | |
372 optimalSplitPos = findOptimalSplitPos(minBlock, maxBlock, maxSplitPos); | |
373 } | |
374 } | |
375 } | |
376 } | |
377 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
378 TTY.println(" optimal split position: %d", optimalSplitPos); | |
379 } | |
380 | |
381 return optimalSplitPos; | |
382 } | |
383 | |
384 // split an interval at the optimal position between minSplitPos and | |
385 // maxSplitPos in two parts: | |
386 // 1) the left part has already a location assigned | |
387 // 2) the right part is sorted into to the unhandled-list | |
388 void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) { | |
389 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
390 TTY.println("----- splitting interval: "); | |
391 } | |
392 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
393 TTY.println(interval.logString(allocator)); | |
394 } | |
395 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
396 TTY.println(" between %d and %d", minSplitPos, maxSplitPos); | |
397 } | |
398 | |
399 assert interval.from() < minSplitPos : "cannot split at start of interval"; | |
400 assert currentPosition < minSplitPos : "cannot split before current position"; | |
401 assert minSplitPos <= maxSplitPos : "invalid order"; | |
402 assert maxSplitPos <= interval.to() : "cannot split after end of interval"; | |
403 | |
404 int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); | |
405 | |
406 assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; | |
407 assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; | |
408 assert optimalSplitPos > interval.from() : "cannot split at start of interval"; | |
409 | |
410 if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { | |
411 // the split position would be just before the end of the interval | |
412 // . no split at all necessary | |
413 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
414 TTY.println(" no split necessary because optimal split position is at end of interval"); | |
415 } | |
416 return; | |
417 } | |
418 | |
419 // must calculate this before the actual split is performed and before split position is moved to odd opId | |
420 boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); | |
421 | |
422 if (!allocator.isBlockBegin(optimalSplitPos)) { | |
423 // move position before actual instruction (odd opId) | |
424 optimalSplitPos = (optimalSplitPos - 1) | 1; | |
425 } | |
426 | |
427 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
428 TTY.println(" splitting at position %d", optimalSplitPos); | |
429 } | |
430 assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; | |
431 assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; | |
432 | |
433 Interval splitPart = interval.split(optimalSplitPos, allocator); | |
434 | |
435 allocator.copyRegisterFlags(interval, splitPart); | |
436 splitPart.setInsertMoveWhenActivated(moveNecessary); | |
437 | |
438 assert splitPart.from() >= current.currentFrom() : "cannot append new interval before current walk position"; | |
439 unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); | |
440 | |
441 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
442 TTY.println(" split interval in two parts (insertMoveWhenActivated: %b)", moveNecessary); | |
443 } | |
444 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
445 TTY.print(" "); | |
446 TTY.println(interval.logString(allocator)); | |
447 TTY.print(" "); | |
448 TTY.println(splitPart.logString(allocator)); | |
449 } | |
450 } | |
451 | |
452 // split an interval at the optimal position between minSplitPos and | |
453 // maxSplitPos in two parts: | |
454 // 1) the left part has already a location assigned | |
455 // 2) the right part is always on the stack and therefore ignored in further processing | |
456 | |
457 void splitForSpilling(Interval interval) { | |
458 // calculate allowed range of splitting position | |
459 int maxSplitPos = currentPosition; | |
460 int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos) + 1, interval.from()); | |
461 | |
462 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
463 TTY.print("----- splitting and spilling interval: "); | |
464 TTY.println(interval.logString(allocator)); | |
465 TTY.println(" between %d and %d", minSplitPos, maxSplitPos); | |
466 } | |
467 | |
468 assert interval.state == State.Active : "why spill interval that is not active?"; | |
469 assert interval.from() <= minSplitPos : "cannot split before start of interval"; | |
470 assert minSplitPos <= maxSplitPos : "invalid order"; | |
471 assert maxSplitPos < interval.to() : "cannot split at end end of interval"; | |
472 assert currentPosition < interval.to() : "interval must not end before current position"; | |
473 | |
474 if (minSplitPos == interval.from()) { | |
475 // the whole interval is never used, so spill it entirely to memory | |
476 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
477 TTY.println(" spilling entire interval because split pos is at beginning of interval"); | |
478 TTY.println(" use positions: " + interval.usePosList().size()); | |
479 } | |
480 assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; | |
481 | |
482 allocator.assignSpillSlot(interval); | |
483 allocator.changeSpillState(interval, minSplitPos); | |
484 | |
485 // Also kick parent intervals out of register to memory when they have no use | |
486 // position. This avoids short interval in register surrounded by intervals in | |
487 // memory . avoid useless moves from memory to register and back | |
488 Interval parent = interval; | |
489 while (parent != null && parent.isSplitChild()) { | |
490 parent = parent.getSplitChildBeforeOpId(parent.from()); | |
491 | |
492 if (parent.location().isRegister()) { | |
493 if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { | |
494 // parent is never used, so kick it out of its assigned register | |
495 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
496 TTY.println(" kicking out interval %d out of its register because it is never used", parent.operandNumber); | |
497 } | |
498 allocator.assignSpillSlot(parent); | |
499 } else { | |
500 // do not go further back because the register is actually used by the interval | |
501 parent = null; | |
502 } | |
503 } | |
504 } | |
505 | |
506 } else { | |
507 // search optimal split pos, split interval and spill only the right hand part | |
508 int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false); | |
509 | |
510 assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; | |
511 assert optimalSplitPos < interval.to() : "cannot split at end of interval"; | |
512 assert optimalSplitPos >= interval.from() : "cannot split before start of interval"; | |
513 | |
514 if (!allocator.isBlockBegin(optimalSplitPos)) { | |
515 // move position before actual instruction (odd opId) | |
516 optimalSplitPos = (optimalSplitPos - 1) | 1; | |
517 } | |
518 | |
519 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
520 TTY.println(" splitting at position %d", optimalSplitPos); | |
521 } | |
522 assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; | |
523 assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; | |
524 | |
525 Interval spilledPart = interval.split(optimalSplitPos, allocator); | |
526 allocator.assignSpillSlot(spilledPart); | |
527 allocator.changeSpillState(spilledPart, optimalSplitPos); | |
528 | |
529 if (!allocator.isBlockBegin(optimalSplitPos)) { | |
530 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
531 TTY.println(" inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); | |
532 } | |
533 insertMove(optimalSplitPos, interval, spilledPart); | |
534 } | |
535 | |
536 // the currentSplitChild is needed later when moves are inserted for reloading | |
537 assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; | |
538 spilledPart.makeCurrentSplitChild(); | |
539 | |
540 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
541 TTY.println(" split interval in two parts"); | |
542 TTY.print(" "); | |
543 TTY.println(interval.logString(allocator)); | |
544 TTY.print(" "); | |
545 TTY.println(spilledPart.logString(allocator)); | |
546 } | |
547 } | |
548 } | |
549 | |
550 void splitStackInterval(Interval interval) { | |
551 int minSplitPos = currentPosition + 1; | |
552 int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to()); | |
553 | |
554 splitBeforeUsage(interval, minSplitPos, maxSplitPos); | |
555 } | |
556 | |
557 void splitWhenPartialRegisterAvailable(Interval interval, int registerAvailableUntil) { | |
558 int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1); | |
559 splitBeforeUsage(interval, minSplitPos, registerAvailableUntil); | |
560 } | |
561 | |
562 void splitAndSpillInterval(Interval interval) { | |
563 assert interval.state == State.Active || interval.state == State.Inactive : "other states not allowed"; | |
564 | |
565 int currentPos = currentPosition; | |
566 if (interval.state == State.Inactive) { | |
567 // the interval is currently inactive, so no spill slot is needed for now. | |
568 // when the split part is activated, the interval has a new chance to get a register, | |
569 // so in the best case no stack slot is necessary | |
570 assert interval.hasHoleBetween(currentPos - 1, currentPos + 1) : "interval can not be inactive otherwise"; | |
571 splitBeforeUsage(interval, currentPos + 1, currentPos + 1); | |
572 | |
573 } else { | |
574 // search the position where the interval must have a register and split | |
575 // at the optimal position before. | |
576 // The new created part is added to the unhandled list and will get a register | |
577 // when it is activated | |
578 int minSplitPos = currentPos + 1; | |
579 int maxSplitPos = Math.min(interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos), interval.to()); | |
580 | |
581 splitBeforeUsage(interval, minSplitPos, maxSplitPos); | |
582 | |
583 assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register"; | |
584 splitForSpilling(interval); | |
585 } | |
586 } | |
587 | |
588 boolean allocFreeRegister(Interval interval) { | |
589 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
590 TTY.println("trying to find free register for " + interval.logString(allocator)); | |
591 } | |
592 | |
593 initUseLists(true); | |
594 freeExcludeActiveFixed(); | |
595 freeExcludeActiveAny(); | |
596 freeCollectInactiveFixed(interval); | |
597 freeCollectInactiveAny(interval); | |
598 // freeCollectUnhandled(fixedKind, cur); | |
599 assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; | |
600 | |
601 // usePos contains the start of the next interval that has this register assigned | |
602 // (either as a fixed register or a normal allocated register in the past) | |
603 // only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely | |
604 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
605 TTY.println(" state of registers:"); | |
606 for (CiRegister register : availableRegs) { | |
607 int i = register.number; | |
608 TTY.println(" reg %d: usePos: %d", register.number, usePos[i]); | |
609 } | |
610 } | |
611 | |
612 CiRegister hint = null; | |
613 Interval locationHint = interval.locationHint(true, allocator); | |
614 if (locationHint != null && locationHint.location() != null && locationHint.location().isRegister()) { | |
615 hint = locationHint.location().asRegister(); | |
616 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
617 TTY.println(" hint register %d from interval %s", hint.number, locationHint.logString(allocator)); | |
618 } | |
619 } | |
620 assert interval.location() == null : "register already assigned to interval"; | |
621 | |
622 // the register must be free at least until this position | |
623 int regNeededUntil = interval.from() + 1; | |
624 int intervalTo = interval.to(); | |
625 | |
626 boolean needSplit = false; | |
627 int splitPos = -1; | |
628 | |
629 CiRegister reg = null; | |
630 CiRegister minFullReg = null; | |
631 CiRegister maxPartialReg = null; | |
632 | |
633 for (int i = 0; i < availableRegs.length; ++i) { | |
634 CiRegister availableReg = availableRegs[i]; | |
635 int number = availableReg.number; | |
636 if (usePos[number] >= intervalTo) { | |
637 // this register is free for the full interval | |
638 if (minFullReg == null || availableReg == hint || (usePos[number] < usePos[minFullReg.number] && minFullReg != hint)) { | |
639 minFullReg = availableReg; | |
640 } | |
641 } else if (usePos[number] > regNeededUntil) { | |
642 // this register is at least free until regNeededUntil | |
643 if (maxPartialReg == null || availableReg == hint || (usePos[number] > usePos[maxPartialReg.number] && maxPartialReg != hint)) { | |
644 maxPartialReg = availableReg; | |
645 } | |
646 } | |
647 } | |
648 | |
649 if (minFullReg != null) { | |
650 reg = minFullReg; | |
651 } else if (maxPartialReg != null) { | |
652 needSplit = true; | |
653 reg = maxPartialReg; | |
654 } else { | |
655 return false; | |
656 } | |
657 | |
658 splitPos = usePos[reg.number]; | |
659 interval.assignLocation(reg.asValue(interval.kind())); | |
660 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
661 TTY.println("selected register %d", reg.number); | |
662 } | |
663 | |
664 assert splitPos > 0 : "invalid splitPos"; | |
665 if (needSplit) { | |
666 // register not available for full interval, so split it | |
667 splitWhenPartialRegisterAvailable(interval, splitPos); | |
668 } | |
669 | |
670 // only return true if interval is completely assigned | |
671 return true; | |
672 } | |
673 | |
674 CiRegister findLockedRegister(int regNeededUntil, int intervalTo, CiValue ignoreReg, boolean[] needSplit) { | |
675 int maxReg = -1; | |
676 CiRegister ignore = ignoreReg.isRegister() ? ignoreReg.asRegister() : null; | |
677 | |
678 for (CiRegister reg : availableRegs) { | |
679 int i = reg.number; | |
680 if (reg == ignore) { | |
681 // this register must be ignored | |
682 | |
683 } else if (usePos[i] > regNeededUntil) { | |
684 if (maxReg == -1 || (usePos[i] > usePos[maxReg])) { | |
685 maxReg = i; | |
686 } | |
687 } | |
688 } | |
689 | |
690 if (maxReg != -1) { | |
691 if (blockPos[maxReg] <= intervalTo) { | |
692 needSplit[0] = true; | |
693 } | |
694 return availableRegs[maxReg]; | |
695 } | |
696 | |
697 return null; | |
698 } | |
699 | |
700 void splitAndSpillIntersectingIntervals(CiRegister reg) { | |
701 assert reg != null : "no register assigned"; | |
702 | |
703 for (int i = 0; i < spillIntervals[reg.number].size(); i++) { | |
704 Interval interval = spillIntervals[reg.number].get(i); | |
705 removeFromList(interval); | |
706 splitAndSpillInterval(interval); | |
707 } | |
708 } | |
709 | |
710 // Split an Interval and spill it to memory so that cur can be placed in a register | |
711 void allocLockedRegister(Interval interval) { | |
712 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
713 TTY.println("need to split and spill to get register for " + interval.logString(allocator)); | |
714 } | |
715 | |
716 // collect current usage of registers | |
717 initUseLists(false); | |
718 spillExcludeActiveFixed(); | |
719 // spillBlockUnhandledFixed(cur); | |
720 assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; | |
721 spillBlockInactiveFixed(interval); | |
722 spillCollectActiveAny(); | |
723 spillCollectInactiveAny(interval); | |
724 | |
725 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
726 TTY.println(" state of registers:"); | |
727 for (CiRegister reg : availableRegs) { | |
728 int i = reg.number; | |
729 TTY.print(" reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i]); | |
730 for (int j = 0; j < spillIntervals[i].size(); j++) { | |
731 TTY.print("%d ", spillIntervals[i].get(j).operandNumber); | |
732 } | |
733 TTY.println(); | |
734 } | |
735 } | |
736 | |
737 // the register must be free at least until this position | |
738 int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister); | |
739 int regNeededUntil = Math.min(firstUsage, interval.from() + 1); | |
740 int intervalTo = interval.to(); | |
741 assert regNeededUntil > 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use"; | |
742 | |
743 CiRegister reg = null; | |
744 CiRegister ignore = interval.location() != null && interval.location().isRegister() ? interval.location().asRegister() : null; | |
745 for (CiRegister availableReg : availableRegs) { | |
746 int number = availableReg.number; | |
747 if (availableReg == ignore) { | |
748 // this register must be ignored | |
749 } else if (usePos[number] > regNeededUntil) { | |
750 if (reg == null || (usePos[number] > usePos[reg.number])) { | |
751 reg = availableReg; | |
752 } | |
753 } | |
754 } | |
755 | |
756 if (reg == null || usePos[reg.number] <= firstUsage) { | |
757 // the first use of cur is later than the spilling position -> spill cur | |
758 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
759 TTY.println("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, reg == null ? 0 : usePos[reg.number]); | |
760 } | |
761 | |
762 if (firstUsage <= interval.from() + 1) { | |
763 assert false : "cannot spill interval that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage + ", interval.from()=" + interval.from(); | |
764 // assign a reasonable register and do a bailout in product mode to avoid errors | |
765 allocator.assignSpillSlot(interval); | |
766 throw new CiBailout("LinearScan: no register found"); | |
767 } | |
768 | |
769 splitAndSpillInterval(interval); | |
770 return; | |
771 } | |
772 | |
773 boolean needSplit = blockPos[reg.number] <= intervalTo; | |
774 | |
775 int splitPos = blockPos[reg.number]; | |
776 | |
777 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
778 TTY.println("decided to use register %d", reg.number); | |
779 } | |
780 assert splitPos > 0 : "invalid splitPos"; | |
781 assert needSplit || splitPos > interval.from() : "splitting interval at from"; | |
782 | |
783 interval.assignLocation(reg.asValue(interval.kind())); | |
784 if (needSplit) { | |
785 // register not available for full interval : so split it | |
786 splitWhenPartialRegisterAvailable(interval, splitPos); | |
787 } | |
788 | |
789 // perform splitting and spilling for all affected intervals | |
790 splitAndSpillIntersectingIntervals(reg); | |
791 } | |
792 | |
793 boolean noAllocationPossible(Interval interval) { | |
794 | |
795 if (compilation.target.arch.isX86()) { | |
796 // fast calculation of intervals that can never get a register because the | |
797 // the next instruction is a call that blocks all registers | |
798 // Note: this does not work if callee-saved registers are available (e.g. on Sparc) | |
799 | |
800 // check if this interval is the result of a split operation | |
801 // (an interval got a register until this position) | |
802 int pos = interval.from(); | |
803 if (isOdd(pos)) { | |
804 // the current instruction is a call that blocks all registers | |
805 if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) { | |
806 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
807 TTY.println(" free register cannot be available because all registers blocked by following call"); | |
808 } | |
809 | |
810 // safety check that there is really no register available | |
811 assert !allocFreeRegister(interval) : "found a register for this interval"; | |
812 return true; | |
813 } | |
814 | |
815 } | |
816 } | |
817 return false; | |
818 } | |
819 | |
820 void initVarsForAlloc(Interval interval) { | |
821 EnumMap<RegisterFlag, CiRegister[]> categorizedRegs = allocator.compilation.registerConfig.getCategorizedAllocatableRegisters(); | |
822 if (allocator.operands.mustBeByteRegister(interval.operand)) { | |
823 assert interval.kind() != CiKind.Float && interval.kind() != CiKind.Double : "cpu regs only"; | |
824 availableRegs = categorizedRegs.get(RegisterFlag.Byte); | |
825 } else if (interval.kind() == CiKind.Float || interval.kind() == CiKind.Double) { | |
826 availableRegs = categorizedRegs.get(RegisterFlag.FPU); | |
827 } else { | |
828 availableRegs = categorizedRegs.get(RegisterFlag.CPU); | |
829 } | |
830 } | |
831 | |
832 boolean isMove(LIRInstruction op, Interval from, Interval to) { | |
833 if (op.code != LIROpcode.Move) { | |
834 return false; | |
835 } | |
836 assert op instanceof LIROp1 : "move must be LIROp1"; | |
837 | |
838 CiValue input = ((LIROp1) op).operand(); | |
839 CiValue result = ((LIROp1) op).result(); | |
840 return input.isVariable() && result.isVariable() && input == from.operand && result == to.operand; | |
841 } | |
842 | |
843 // optimization (especially for phi functions of nested loops): | |
844 // assign same spill slot to non-intersecting intervals | |
845 void combineSpilledIntervals(Interval interval) { | |
846 if (interval.isSplitChild()) { | |
847 // optimization is only suitable for split parents | |
848 return; | |
849 } | |
850 | |
851 Interval registerHint = interval.locationHint(false, allocator); | |
852 if (registerHint == null) { | |
853 // cur is not the target of a move : otherwise registerHint would be set | |
854 return; | |
855 } | |
856 assert registerHint.isSplitParent() : "register hint must be split parent"; | |
857 | |
858 if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) { | |
859 // combining the stack slots for intervals where spill move optimization is applied | |
860 // is not benefitial and would cause problems | |
861 return; | |
862 } | |
863 | |
864 int beginPos = interval.from(); | |
865 int endPos = interval.to(); | |
866 if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) { | |
867 // safety check that lirOpWithId is allowed | |
868 return; | |
869 } | |
870 | |
871 if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) { | |
872 // cur and registerHint are not connected with two moves | |
873 return; | |
874 } | |
875 | |
876 Interval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.Input, allocator); | |
877 Interval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.Output, allocator); | |
878 if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) { | |
879 // registerHint must be split : otherwise the re-writing of use positions does not work | |
880 return; | |
881 } | |
882 | |
883 assert beginHint.location() != null : "must have register assigned"; | |
884 assert endHint.location() == null : "must not have register assigned"; | |
885 assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move"; | |
886 assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move"; | |
887 | |
888 if (beginHint.location().isRegister()) { | |
889 // registerHint is not spilled at beginPos : so it would not be benefitial to immediately spill cur | |
890 return; | |
891 } | |
892 assert registerHint.spillSlot() != null : "must be set when part of interval was spilled"; | |
893 | |
894 // modify intervals such that cur gets the same stack slot as registerHint | |
895 // delete use positions to prevent the intervals to get a register at beginning | |
896 interval.setSpillSlot(registerHint.spillSlot()); | |
897 interval.removeFirstUsePos(); | |
898 endHint.removeFirstUsePos(); | |
899 } | |
900 | |
901 // allocate a physical register or memory location to an interval | |
902 @Override | |
903 boolean activateCurrent() { | |
904 Interval interval = current; | |
905 boolean result = true; | |
906 | |
907 if (C1XOptions.TraceLinearScanLevel >= 2) { | |
908 TTY.println("+++++ activating interval " + interval.logString(allocator)); | |
909 } | |
910 | |
911 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
912 TTY.println(" splitParent: %s, insertMoveWhenActivated: %b", interval.splitParent().operandNumber, interval.insertMoveWhenActivated()); | |
913 } | |
914 | |
915 final CiValue operand = interval.operand; | |
916 if (interval.location() != null && interval.location().isStackSlot()) { | |
917 // activating an interval that has a stack slot assigned . split it at first use position | |
918 // used for method parameters | |
919 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
920 TTY.println(" interval has spill slot assigned (method parameter) . split it before first use"); | |
921 } | |
922 splitStackInterval(interval); | |
923 result = false; | |
924 | |
925 } else { | |
926 if (operand.isVariable() && allocator.operands.mustStartInMemory((CiVariable) operand)) { | |
927 assert interval.location() == null : "register already assigned"; | |
928 allocator.assignSpillSlot(interval); | |
929 | |
930 if (!allocator.operands.mustStayInMemory((CiVariable) operand)) { | |
931 // activating an interval that must start in a stack slot but may get a register later | |
932 // used for lirRoundfp: rounding is done by store to stack and reload later | |
933 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
934 TTY.println(" interval must start in stack slot . split it before first use"); | |
935 } | |
936 splitStackInterval(interval); | |
937 } | |
938 | |
939 result = false; | |
940 } else if (interval.location() == null) { | |
941 // interval has not assigned register . normal allocation | |
942 // (this is the normal case for most intervals) | |
943 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
944 TTY.println(" normal allocation of register"); | |
945 } | |
946 | |
947 // assign same spill slot to non-intersecting intervals | |
948 combineSpilledIntervals(interval); | |
949 | |
950 initVarsForAlloc(interval); | |
951 if (noAllocationPossible(interval) || !allocFreeRegister(interval)) { | |
952 // no empty register available. | |
953 // split and spill another interval so that this interval gets a register | |
954 allocLockedRegister(interval); | |
955 } | |
956 | |
957 // spilled intervals need not be move to active-list | |
958 if (!interval.location().isRegister()) { | |
959 result = false; | |
960 } | |
961 } | |
962 } | |
963 | |
964 // load spilled values that become active from stack slot to register | |
965 if (interval.insertMoveWhenActivated()) { | |
966 assert interval.isSplitChild(); | |
967 assert interval.currentSplitChild() != null; | |
968 assert interval.currentSplitChild().operand != operand : "cannot insert move between same interval"; | |
969 if (C1XOptions.TraceLinearScanLevel >= 4) { | |
970 TTY.println("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber); | |
971 } | |
972 | |
973 insertMove(interval.from(), interval.currentSplitChild(), interval); | |
974 } | |
975 interval.makeCurrentSplitChild(); | |
976 | |
977 return result; // true = interval is moved to active list | |
978 } | |
979 | |
980 public void finishAllocation() { | |
981 // must be called when all intervals are allocated | |
982 moveResolver.resolveAndAppendMoves(); | |
983 } | |
984 } |