Mercurial > hg > graal-compiler
comparison graal/GraalCompiler/src/com/sun/c1x/graph/GraphBuilder.java @ 2509:16b9a8b5ad39
Renamings Runtime=>GraalRuntime and Compiler=>GraalCompiler
author | Thomas Wuerthinger <thomas@wuerthinger.net> |
---|---|
date | Wed, 27 Apr 2011 11:50:44 +0200 |
parents | graal/Compiler/src/com/sun/c1x/graph/GraphBuilder.java@9ec15d6914ca |
children | 4fdef1464592 |
comparison
equal
deleted
inserted
replaced
2508:fea94949e0a2 | 2509:16b9a8b5ad39 |
---|---|
1 /* | |
2 * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
20 * or visit www.oracle.com if you need additional information or have any | |
21 * questions. | |
22 */ | |
23 package com.sun.c1x.graph; | |
24 | |
25 import static com.sun.cri.bytecode.Bytecodes.*; | |
26 import static java.lang.reflect.Modifier.*; | |
27 | |
28 import java.lang.reflect.*; | |
29 import java.util.*; | |
30 | |
31 import com.sun.c1x.*; | |
32 import com.sun.c1x.debug.*; | |
33 import com.sun.c1x.graph.ScopeData.ReturnBlock; | |
34 import com.sun.c1x.ir.*; | |
35 import com.sun.c1x.ir.Value.Flag; | |
36 import com.sun.c1x.opt.*; | |
37 import com.sun.c1x.util.*; | |
38 import com.sun.c1x.value.*; | |
39 import com.sun.cri.bytecode.*; | |
40 import com.sun.cri.bytecode.Bytecodes.JniOp; | |
41 import com.sun.cri.ci.*; | |
42 import com.sun.cri.ri.*; | |
43 import com.sun.cri.ri.RiType.Representation; | |
44 | |
45 /** | |
46 * The {@code GraphBuilder} class parses the bytecode of a method and builds the IR graph. | |
47 * A number of optimizations may be performed during parsing of the bytecode, including value | |
48 * numbering, inlining, constant folding, strength reduction, etc. | |
49 * | |
50 * @author Ben L. Titzer | |
51 * @author Doug Simon | |
52 */ | |
53 public final class GraphBuilder { | |
54 | |
55 /** | |
56 * The minimum value to which {@link C1XOptions#TraceBytecodeParserLevel} must be set to trace | |
57 * the bytecode instructions as they are parsed. | |
58 */ | |
59 public static final int TRACELEVEL_INSTRUCTIONS = 1; | |
60 | |
61 /** | |
62 * The minimum value to which {@link C1XOptions#TraceBytecodeParserLevel} must be set to trace | |
63 * the frame state before each bytecode instruction as it is parsed. | |
64 */ | |
65 public static final int TRACELEVEL_STATE = 2; | |
66 | |
67 final IR ir; | |
68 final C1XCompilation compilation; | |
69 final CiStatistics stats; | |
70 | |
71 /** | |
72 * Map used to implement local value numbering for the current block. | |
73 */ | |
74 final ValueMap localValueMap; | |
75 | |
76 /** | |
77 * Map used for local load elimination (i.e. within the current block). | |
78 */ | |
79 final MemoryMap memoryMap; | |
80 | |
81 final Canonicalizer canonicalizer; // canonicalizer which does strength reduction + constant folding | |
82 ScopeData scopeData; // Per-scope data; used for inlining | |
83 BlockBegin curBlock; // the current block | |
84 MutableFrameState curState; // the current execution state | |
85 Instruction lastInstr; // the last instruction added | |
86 final LogStream log; | |
87 | |
88 boolean skipBlock; // skip processing of the rest of this block | |
89 private Value rootMethodSynchronizedObject; | |
90 | |
91 /** | |
92 * Creates a new, initialized, {@code GraphBuilder} instance for a given compilation. | |
93 * | |
94 * @param compilation the compilation | |
95 * @param ir the IR to build the graph into | |
96 */ | |
97 public GraphBuilder(C1XCompilation compilation, IR ir) { | |
98 this.compilation = compilation; | |
99 this.ir = ir; | |
100 this.stats = compilation.stats; | |
101 this.memoryMap = C1XOptions.OptLocalLoadElimination ? new MemoryMap() : null; | |
102 this.localValueMap = C1XOptions.OptLocalValueNumbering ? new ValueMap() : null; | |
103 this.canonicalizer = C1XOptions.OptCanonicalize ? new Canonicalizer(compilation.runtime, compilation.method, compilation.target) : null; | |
104 log = C1XOptions.TraceBytecodeParserLevel > 0 ? new LogStream(TTY.out()) : null; | |
105 } | |
106 | |
107 /** | |
108 * Builds the graph for a the specified {@code IRScope}. | |
109 * @param scope the top IRScope | |
110 */ | |
111 public void build(IRScope scope) { | |
112 RiMethod rootMethod = compilation.method; | |
113 | |
114 if (log != null) { | |
115 log.println(); | |
116 log.println("Compiling " + compilation.method); | |
117 } | |
118 | |
119 // 1. create the start block | |
120 ir.startBlock = new BlockBegin(0, ir.nextBlockNumber()); | |
121 BlockBegin startBlock = ir.startBlock; | |
122 | |
123 // 2. compute the block map and get the entrypoint(s) | |
124 BlockMap blockMap = compilation.getBlockMap(scope.method, compilation.osrBCI); | |
125 BlockBegin stdEntry = blockMap.get(0); | |
126 BlockBegin osrEntry = compilation.osrBCI < 0 ? null : blockMap.get(compilation.osrBCI); | |
127 pushRootScope(scope, blockMap, startBlock); | |
128 MutableFrameState initialState = stateAtEntry(rootMethod); | |
129 startBlock.mergeOrClone(initialState); | |
130 BlockBegin syncHandler = null; | |
131 | |
132 // 3. setup internal state for appending instructions | |
133 curBlock = startBlock; | |
134 lastInstr = startBlock; | |
135 lastInstr.setNext(null, -1); | |
136 curState = initialState; | |
137 | |
138 if (isSynchronized(rootMethod.accessFlags())) { | |
139 // 4A.1 add a monitor enter to the start block | |
140 rootMethodSynchronizedObject = synchronizedObject(initialState, compilation.method); | |
141 genMonitorEnter(rootMethodSynchronizedObject, Instruction.SYNCHRONIZATION_ENTRY_BCI); | |
142 // 4A.2 finish the start block | |
143 finishStartBlock(startBlock, stdEntry, osrEntry); | |
144 | |
145 // 4A.3 setup an exception handler to unlock the root method synchronized object | |
146 syncHandler = new BlockBegin(Instruction.SYNCHRONIZATION_ENTRY_BCI, ir.nextBlockNumber()); | |
147 syncHandler.setExceptionEntry(); | |
148 syncHandler.setBlockFlag(BlockBegin.BlockFlag.IsOnWorkList); | |
149 syncHandler.setBlockFlag(BlockBegin.BlockFlag.DefaultExceptionHandler); | |
150 | |
151 ExceptionHandler h = new ExceptionHandler(new CiExceptionHandler(0, rootMethod.code().length, -1, 0, null)); | |
152 h.setEntryBlock(syncHandler); | |
153 scopeData.addExceptionHandler(h); | |
154 } else { | |
155 // 4B.1 simply finish the start block | |
156 finishStartBlock(startBlock, stdEntry, osrEntry); | |
157 } | |
158 | |
159 // 5. | |
160 C1XIntrinsic intrinsic = C1XOptions.OptIntrinsify ? C1XIntrinsic.getIntrinsic(rootMethod) : null; | |
161 if (intrinsic != null) { | |
162 lastInstr = stdEntry; | |
163 // 6A.1 the root method is an intrinsic; load the parameters onto the stack and try to inline it | |
164 if (C1XOptions.OptIntrinsify && osrEntry == null) { | |
165 // try to inline an Intrinsic node | |
166 boolean isStatic = Modifier.isStatic(rootMethod.accessFlags()); | |
167 int argsSize = rootMethod.signature().argumentSlots(!isStatic); | |
168 Value[] args = new Value[argsSize]; | |
169 for (int i = 0; i < args.length; i++) { | |
170 args[i] = curState.localAt(i); | |
171 } | |
172 if (tryInlineIntrinsic(rootMethod, args, isStatic, intrinsic)) { | |
173 // intrinsic inlining succeeded, add the return node | |
174 CiKind rt = returnKind(rootMethod).stackKind(); | |
175 Value result = null; | |
176 if (rt != CiKind.Void) { | |
177 result = pop(rt); | |
178 } | |
179 genReturn(result); | |
180 BlockEnd end = (BlockEnd) lastInstr; | |
181 stdEntry.setEnd(end); | |
182 end.setStateAfter(curState.immutableCopy(bci())); | |
183 } else { | |
184 // try intrinsic failed; do the normal parsing | |
185 scopeData.addToWorkList(stdEntry); | |
186 iterateAllBlocks(); | |
187 } | |
188 } else { | |
189 // 6B.1 do the normal parsing | |
190 scopeData.addToWorkList(stdEntry); | |
191 iterateAllBlocks(); | |
192 } | |
193 } else { | |
194 RiType accessor = openAccessorScope(rootMethod); | |
195 | |
196 // 6B.1 do the normal parsing | |
197 scopeData.addToWorkList(stdEntry); | |
198 iterateAllBlocks(); | |
199 | |
200 closeAccessorScope(accessor); | |
201 } | |
202 | |
203 if (syncHandler != null && syncHandler.stateBefore() != null) { | |
204 // generate unlocking code if the exception handler is reachable | |
205 fillSyncHandler(rootMethodSynchronizedObject, syncHandler, false); | |
206 } | |
207 | |
208 if (compilation.osrBCI >= 0) { | |
209 BlockBegin osrBlock = blockMap.get(compilation.osrBCI); | |
210 assert osrBlock.wasVisited(); | |
211 if (!osrBlock.stateBefore().stackEmpty()) { | |
212 throw new CiBailout("cannot OSR with non-empty stack"); | |
213 } | |
214 } | |
215 } | |
216 | |
217 private void closeAccessorScope(RiType accessor) { | |
218 if (accessor != null) { | |
219 boundAccessor.set(null); | |
220 } | |
221 } | |
222 | |
223 private RiType openAccessorScope(RiMethod rootMethod) { | |
224 RiType accessor = rootMethod.accessor(); | |
225 if (accessor != null) { | |
226 assert boundAccessor.get() == null; | |
227 boundAccessor.set(accessor); | |
228 | |
229 // What looks like an object receiver in the bytecode may not be a word value | |
230 compilation.setNotTypesafe(); | |
231 } | |
232 return accessor; | |
233 } | |
234 | |
235 private void finishStartBlock(BlockBegin startBlock, BlockBegin stdEntry, BlockBegin osrEntry) { | |
236 assert curBlock == startBlock; | |
237 Base base = new Base(stdEntry, osrEntry); | |
238 appendWithoutOptimization(base, 0); | |
239 FrameState stateAfter = curState.immutableCopy(bci()); | |
240 base.setStateAfter(stateAfter); | |
241 startBlock.setEnd(base); | |
242 assert stdEntry.stateBefore() == null; | |
243 stdEntry.mergeOrClone(stateAfter); | |
244 } | |
245 | |
246 void pushRootScope(IRScope scope, BlockMap blockMap, BlockBegin start) { | |
247 BytecodeStream stream = new BytecodeStream(scope.method.code()); | |
248 RiConstantPool constantPool = compilation.runtime.getConstantPool(scope.method); | |
249 scopeData = new ScopeData(null, scope, blockMap, stream, constantPool); | |
250 curBlock = start; | |
251 } | |
252 | |
253 public boolean hasHandler() { | |
254 return scopeData.hasHandler(); | |
255 } | |
256 | |
257 public IRScope scope() { | |
258 return scopeData.scope; | |
259 } | |
260 | |
261 public IRScope rootScope() { | |
262 IRScope root = scope(); | |
263 while (root.caller != null) { | |
264 root = root.caller; | |
265 } | |
266 return root; | |
267 } | |
268 | |
269 public RiMethod method() { | |
270 return scopeData.scope.method; | |
271 } | |
272 | |
273 public BytecodeStream stream() { | |
274 return scopeData.stream; | |
275 } | |
276 | |
277 public int bci() { | |
278 return scopeData.stream.currentBCI(); | |
279 } | |
280 | |
281 public int nextBCI() { | |
282 return scopeData.stream.nextBCI(); | |
283 } | |
284 | |
285 private void ipush(Value x) { | |
286 curState.ipush(x); | |
287 } | |
288 | |
289 private void lpush(Value x) { | |
290 curState.lpush(x); | |
291 } | |
292 | |
293 private void fpush(Value x) { | |
294 curState.fpush(x); | |
295 } | |
296 | |
297 private void dpush(Value x) { | |
298 curState.dpush(x); | |
299 } | |
300 | |
301 private void apush(Value x) { | |
302 curState.apush(x); | |
303 } | |
304 | |
305 private void wpush(Value x) { | |
306 curState.wpush(x); | |
307 } | |
308 | |
309 private void push(CiKind kind, Value x) { | |
310 curState.push(kind, x); | |
311 } | |
312 | |
313 private void pushReturn(CiKind kind, Value x) { | |
314 if (kind != CiKind.Void) { | |
315 curState.push(kind.stackKind(), x); | |
316 } | |
317 } | |
318 | |
319 private Value ipop() { | |
320 return curState.ipop(); | |
321 } | |
322 | |
323 private Value lpop() { | |
324 return curState.lpop(); | |
325 } | |
326 | |
327 private Value fpop() { | |
328 return curState.fpop(); | |
329 } | |
330 | |
331 private Value dpop() { | |
332 return curState.dpop(); | |
333 } | |
334 | |
335 private Value apop() { | |
336 return curState.apop(); | |
337 } | |
338 | |
339 private Value wpop() { | |
340 return curState.wpop(); | |
341 } | |
342 | |
343 private Value pop(CiKind kind) { | |
344 return curState.pop(kind); | |
345 } | |
346 | |
347 private CiKind peekKind() { | |
348 Value top = curState.stackAt(curState.stackSize() - 1); | |
349 if (top == null) { | |
350 top = curState.stackAt(curState.stackSize() - 2); | |
351 assert top != null; | |
352 assert top.kind.isDoubleWord(); | |
353 } | |
354 return top.kind; | |
355 } | |
356 | |
357 private void loadLocal(int index, CiKind kind) { | |
358 push(kind, curState.loadLocal(index)); | |
359 } | |
360 | |
361 private void storeLocal(CiKind kind, int index) { | |
362 if (scopeData.parsingJsr()) { | |
363 // We need to do additional tracking of the location of the return | |
364 // address for jsrs since we don't handle arbitrary jsr/ret | |
365 // constructs. Here we are figuring out in which circumstances we | |
366 // need to bail out. | |
367 if (kind == CiKind.Object) { | |
368 // might be storing the JSR return address | |
369 Value x = curState.xpop(); | |
370 if (x.kind.isJsr()) { | |
371 setJsrReturnAddressLocal(index); | |
372 curState.storeLocal(index, x); | |
373 } else { | |
374 // nope, not storing the JSR return address | |
375 assert x.kind.isObject(); | |
376 curState.storeLocal(index, x); | |
377 overwriteJsrReturnAddressLocal(index); | |
378 } | |
379 return; | |
380 } else { | |
381 // not storing the JSR return address local, but might overwrite it | |
382 overwriteJsrReturnAddressLocal(index); | |
383 } | |
384 } | |
385 | |
386 curState.storeLocal(index, pop(kind)); | |
387 } | |
388 | |
389 private void overwriteJsrReturnAddressLocal(int index) { | |
390 if (index == scopeData.jsrEntryReturnAddressLocal()) { | |
391 scopeData.setJsrEntryReturnAddressLocal(-1); | |
392 } | |
393 } | |
394 | |
395 private void setJsrReturnAddressLocal(int index) { | |
396 scopeData.setJsrEntryReturnAddressLocal(index); | |
397 | |
398 // Also check parent jsrs (if any) at this time to see whether | |
399 // they are using this local. We don't handle skipping over a | |
400 // ret. | |
401 for (ScopeData cur = scopeData.parent; cur != null && cur.parsingJsr() && cur.scope == scope(); cur = cur.parent) { | |
402 if (cur.jsrEntryReturnAddressLocal() == index) { | |
403 throw new CiBailout("subroutine overwrites return address from previous subroutine"); | |
404 } | |
405 } | |
406 } | |
407 | |
408 List<ExceptionHandler> handleException(Instruction x, int bci) { | |
409 if (!hasHandler()) { | |
410 return Util.uncheckedCast(Collections.EMPTY_LIST); | |
411 } | |
412 | |
413 ArrayList<ExceptionHandler> exceptionHandlers = new ArrayList<ExceptionHandler>(); | |
414 ScopeData curScopeData = scopeData; | |
415 FrameState stateBefore = x.stateBefore(); | |
416 int scopeCount = 0; | |
417 | |
418 assert stateBefore != null : "exception handler state must be available for " + x; | |
419 FrameState state = stateBefore; | |
420 do { | |
421 assert curScopeData.scope == state.scope() : "scopes do not match"; | |
422 assert bci == Instruction.SYNCHRONIZATION_ENTRY_BCI || bci == curScopeData.stream.currentBCI() : "invalid bci"; | |
423 | |
424 // join with all potential exception handlers | |
425 List<ExceptionHandler> handlers = curScopeData.exceptionHandlers(); | |
426 if (handlers != null) { | |
427 for (ExceptionHandler handler : handlers) { | |
428 if (handler.covers(bci)) { | |
429 // if the handler covers this bytecode index, add it to the list | |
430 if (addExceptionHandler(exceptionHandlers, handler, curScopeData, state, scopeCount)) { | |
431 // if the handler was a default handler, we are done | |
432 return exceptionHandlers; | |
433 } | |
434 } | |
435 } | |
436 } | |
437 // pop the scope to the next IRScope level | |
438 // if parsing a JSR, skip scopes until the next IRScope level | |
439 IRScope curScope = curScopeData.scope; | |
440 while (curScopeData.parent != null && curScopeData.parent.scope == curScope) { | |
441 curScopeData = curScopeData.parent; | |
442 } | |
443 if (curScopeData.parent == null) { | |
444 // no more levels, done | |
445 break; | |
446 } | |
447 // there is another level, pop | |
448 state = state.callerState(); | |
449 bci = curScopeData.scope.callerBCI(); | |
450 curScopeData = curScopeData.parent; | |
451 scopeCount++; | |
452 | |
453 } while (true); | |
454 | |
455 return exceptionHandlers; | |
456 } | |
457 | |
458 /** | |
459 * Adds an exception handler to the {@linkplain BlockBegin#exceptionHandlerBlocks() list} | |
460 * of exception handlers for the {@link #curBlock current block}. | |
461 * | |
462 * @param exceptionHandlers | |
463 * @param handler | |
464 * @param curScopeData | |
465 * @param curState the current state with empty stack | |
466 * @param scopeCount | |
467 * @return {@code true} if handler catches all exceptions (i.e. {@code handler.isCatchAll() == true}) | |
468 */ | |
469 private boolean addExceptionHandler(ArrayList<ExceptionHandler> exceptionHandlers, ExceptionHandler handler, ScopeData curScopeData, FrameState curState, int scopeCount) { | |
470 compilation.setHasExceptionHandlers(); | |
471 | |
472 BlockBegin entry = handler.entryBlock(); | |
473 FrameState entryState = entry.stateBefore(); | |
474 | |
475 assert entry.bci() == handler.handler.handlerBCI(); | |
476 assert entry.bci() == -1 || entry == curScopeData.blockAt(entry.bci()) : "blocks must correspond"; | |
477 assert entryState == null || curState.locksSize() == entryState.locksSize() : "locks do not match"; | |
478 | |
479 // exception handler starts with an empty expression stack | |
480 curState = curState.immutableCopyWithEmptyStack(); | |
481 | |
482 entry.mergeOrClone(curState); | |
483 | |
484 // add current state for correct handling of phi functions | |
485 int phiOperand = entry.addExceptionState(curState); | |
486 | |
487 // add entry to the list of exception handlers of this block | |
488 curBlock.addExceptionHandler(entry); | |
489 | |
490 // add back-edge from exception handler entry to this block | |
491 if (!entry.predecessors().contains(curBlock)) { | |
492 entry.addPredecessor(curBlock); | |
493 } | |
494 | |
495 // clone exception handler | |
496 ExceptionHandler newHandler = new ExceptionHandler(handler); | |
497 newHandler.setPhiOperand(phiOperand); | |
498 newHandler.setScopeCount(scopeCount); | |
499 exceptionHandlers.add(newHandler); | |
500 | |
501 // fill in exception handler subgraph lazily | |
502 if (!entry.wasVisited()) { | |
503 curScopeData.addToWorkList(entry); | |
504 } else { | |
505 // This will occur for exception handlers that cover themselves. This code | |
506 // pattern is generated by javac for synchronized blocks. See the following | |
507 // for why this change to javac was made: | |
508 // | |
509 // http://www.cs.arizona.edu/projects/sumatra/hallofshame/java-async-race.html | |
510 } | |
511 | |
512 // stop when reaching catch all | |
513 return handler.isCatchAll(); | |
514 } | |
515 | |
516 void genLoadConstant(int cpi) { | |
517 Object con = constantPool().lookupConstant(cpi); | |
518 | |
519 if (con instanceof RiType) { | |
520 // this is a load of class constant which might be unresolved | |
521 RiType riType = (RiType) con; | |
522 if (!riType.isResolved() || C1XOptions.TestPatching) { | |
523 push(CiKind.Object, append(new ResolveClass(riType, RiType.Representation.JavaClass, null))); | |
524 } else { | |
525 push(CiKind.Object, append(new Constant(riType.getEncoding(Representation.JavaClass)))); | |
526 } | |
527 } else if (con instanceof CiConstant) { | |
528 CiConstant constant = (CiConstant) con; | |
529 push(constant.kind.stackKind(), appendConstant(constant)); | |
530 } else { | |
531 throw new Error("lookupConstant returned an object of incorrect type"); | |
532 } | |
533 } | |
534 | |
535 void genLoadIndexed(CiKind kind) { | |
536 FrameState stateBefore = curState.immutableCopy(bci()); | |
537 Value index = ipop(); | |
538 Value array = apop(); | |
539 Value length = null; | |
540 if (cseArrayLength(array)) { | |
541 length = append(new ArrayLength(array, stateBefore)); | |
542 } | |
543 Value v = append(new LoadIndexed(array, index, length, kind, stateBefore)); | |
544 push(kind.stackKind(), v); | |
545 } | |
546 | |
547 void genStoreIndexed(CiKind kind) { | |
548 FrameState stateBefore = curState.immutableCopy(bci()); | |
549 Value value = pop(kind.stackKind()); | |
550 Value index = ipop(); | |
551 Value array = apop(); | |
552 Value length = null; | |
553 if (cseArrayLength(array)) { | |
554 length = append(new ArrayLength(array, stateBefore)); | |
555 } | |
556 StoreIndexed result = new StoreIndexed(array, index, length, kind, value, stateBefore); | |
557 append(result); | |
558 if (memoryMap != null) { | |
559 memoryMap.storeValue(value); | |
560 } | |
561 } | |
562 | |
563 void stackOp(int opcode) { | |
564 switch (opcode) { | |
565 case POP: { | |
566 curState.xpop(); | |
567 break; | |
568 } | |
569 case POP2: { | |
570 curState.xpop(); | |
571 curState.xpop(); | |
572 break; | |
573 } | |
574 case DUP: { | |
575 Value w = curState.xpop(); | |
576 curState.xpush(w); | |
577 curState.xpush(w); | |
578 break; | |
579 } | |
580 case DUP_X1: { | |
581 Value w1 = curState.xpop(); | |
582 Value w2 = curState.xpop(); | |
583 curState.xpush(w1); | |
584 curState.xpush(w2); | |
585 curState.xpush(w1); | |
586 break; | |
587 } | |
588 case DUP_X2: { | |
589 Value w1 = curState.xpop(); | |
590 Value w2 = curState.xpop(); | |
591 Value w3 = curState.xpop(); | |
592 curState.xpush(w1); | |
593 curState.xpush(w3); | |
594 curState.xpush(w2); | |
595 curState.xpush(w1); | |
596 break; | |
597 } | |
598 case DUP2: { | |
599 Value w1 = curState.xpop(); | |
600 Value w2 = curState.xpop(); | |
601 curState.xpush(w2); | |
602 curState.xpush(w1); | |
603 curState.xpush(w2); | |
604 curState.xpush(w1); | |
605 break; | |
606 } | |
607 case DUP2_X1: { | |
608 Value w1 = curState.xpop(); | |
609 Value w2 = curState.xpop(); | |
610 Value w3 = curState.xpop(); | |
611 curState.xpush(w2); | |
612 curState.xpush(w1); | |
613 curState.xpush(w3); | |
614 curState.xpush(w2); | |
615 curState.xpush(w1); | |
616 break; | |
617 } | |
618 case DUP2_X2: { | |
619 Value w1 = curState.xpop(); | |
620 Value w2 = curState.xpop(); | |
621 Value w3 = curState.xpop(); | |
622 Value w4 = curState.xpop(); | |
623 curState.xpush(w2); | |
624 curState.xpush(w1); | |
625 curState.xpush(w4); | |
626 curState.xpush(w3); | |
627 curState.xpush(w2); | |
628 curState.xpush(w1); | |
629 break; | |
630 } | |
631 case SWAP: { | |
632 Value w1 = curState.xpop(); | |
633 Value w2 = curState.xpop(); | |
634 curState.xpush(w1); | |
635 curState.xpush(w2); | |
636 break; | |
637 } | |
638 default: | |
639 throw Util.shouldNotReachHere(); | |
640 } | |
641 | |
642 } | |
643 | |
644 void genArithmeticOp(CiKind kind, int opcode) { | |
645 genArithmeticOp(kind, opcode, null); | |
646 } | |
647 | |
648 void genArithmeticOp(CiKind kind, int opcode, FrameState state) { | |
649 genArithmeticOp(kind, opcode, kind, kind, state); | |
650 } | |
651 | |
652 void genArithmeticOp(CiKind result, int opcode, CiKind x, CiKind y, FrameState state) { | |
653 Value yValue = pop(y); | |
654 Value xValue = pop(x); | |
655 Value result1 = append(new ArithmeticOp(opcode, result, xValue, yValue, isStrict(method().accessFlags()), state)); | |
656 push(result, result1); | |
657 } | |
658 | |
659 void genNegateOp(CiKind kind) { | |
660 push(kind, append(new NegateOp(pop(kind)))); | |
661 } | |
662 | |
663 void genShiftOp(CiKind kind, int opcode) { | |
664 Value s = ipop(); | |
665 Value x = pop(kind); | |
666 // note that strength reduction of e << K >>> K is correctly handled in canonicalizer now | |
667 push(kind, append(new ShiftOp(opcode, x, s))); | |
668 } | |
669 | |
670 void genLogicOp(CiKind kind, int opcode) { | |
671 Value y = pop(kind); | |
672 Value x = pop(kind); | |
673 push(kind, append(new LogicOp(opcode, x, y))); | |
674 } | |
675 | |
676 void genCompareOp(CiKind kind, int opcode, CiKind resultKind) { | |
677 Value y = pop(kind); | |
678 Value x = pop(kind); | |
679 Value value = append(new CompareOp(opcode, resultKind, x, y)); | |
680 if (!resultKind.isVoid()) { | |
681 ipush(value); | |
682 } | |
683 } | |
684 | |
685 void genUnsignedCompareOp(CiKind kind, int opcode, int op) { | |
686 Value y = pop(kind); | |
687 Value x = pop(kind); | |
688 ipush(append(new UnsignedCompareOp(opcode, op, x, y))); | |
689 } | |
690 | |
691 void genConvert(int opcode, CiKind from, CiKind to) { | |
692 CiKind tt = to.stackKind(); | |
693 push(tt, append(new Convert(opcode, pop(from.stackKind()), tt))); | |
694 } | |
695 | |
696 void genIncrement() { | |
697 int index = stream().readLocalIndex(); | |
698 int delta = stream().readIncrement(); | |
699 Value x = curState.localAt(index); | |
700 Value y = append(Constant.forInt(delta)); | |
701 curState.storeLocal(index, append(new ArithmeticOp(IADD, CiKind.Int, x, y, isStrict(method().accessFlags()), null))); | |
702 } | |
703 | |
704 void genGoto(int fromBCI, int toBCI) { | |
705 boolean isSafepoint = !scopeData.noSafepoints() && toBCI <= fromBCI; | |
706 append(new Goto(blockAt(toBCI), null, isSafepoint)); | |
707 } | |
708 | |
709 void ifNode(Value x, Condition cond, Value y, FrameState stateBefore) { | |
710 BlockBegin tsucc = blockAt(stream().readBranchDest()); | |
711 BlockBegin fsucc = blockAt(stream().nextBCI()); | |
712 int bci = stream().currentBCI(); | |
713 boolean isSafepoint = !scopeData.noSafepoints() && tsucc.bci() <= bci || fsucc.bci() <= bci; | |
714 append(new If(x, cond, false, y, tsucc, fsucc, isSafepoint ? stateBefore : null, isSafepoint)); | |
715 } | |
716 | |
717 void genIfZero(Condition cond) { | |
718 Value y = appendConstant(CiConstant.INT_0); | |
719 FrameState stateBefore = curState.immutableCopy(bci()); | |
720 Value x = ipop(); | |
721 ifNode(x, cond, y, stateBefore); | |
722 } | |
723 | |
724 void genIfNull(Condition cond) { | |
725 FrameState stateBefore = curState.immutableCopy(bci()); | |
726 Value y = appendConstant(CiConstant.NULL_OBJECT); | |
727 Value x = apop(); | |
728 ifNode(x, cond, y, stateBefore); | |
729 } | |
730 | |
731 void genIfSame(CiKind kind, Condition cond) { | |
732 FrameState stateBefore = curState.immutableCopy(bci()); | |
733 Value y = pop(kind); | |
734 Value x = pop(kind); | |
735 ifNode(x, cond, y, stateBefore); | |
736 } | |
737 | |
738 void genThrow(int bci) { | |
739 FrameState stateBefore = curState.immutableCopy(bci()); | |
740 Throw t = new Throw(apop(), stateBefore, !scopeData.noSafepoints()); | |
741 appendWithoutOptimization(t, bci); | |
742 } | |
743 | |
744 void genUnsafeCast(RiMethod method) { | |
745 compilation.setNotTypesafe(); | |
746 RiSignature signature = method.signature(); | |
747 int argCount = signature.argumentCount(false); | |
748 RiType accessingClass = scope().method.holder(); | |
749 RiType fromType; | |
750 RiType toType = signature.returnType(accessingClass); | |
751 if (argCount == 1) { | |
752 fromType = signature.argumentTypeAt(0, accessingClass); | |
753 } else { | |
754 assert argCount == 0 : "method with @UNSAFE_CAST must have exactly 1 argument"; | |
755 fromType = method.holder(); | |
756 } | |
757 CiKind from = fromType.kind(); | |
758 CiKind to = toType.kind(); | |
759 boolean redundant = compilation.archKindsEqual(to, from); | |
760 curState.push(to, append(new UnsafeCast(toType, curState.pop(from), redundant))); | |
761 } | |
762 | |
763 void genCheckCast() { | |
764 int cpi = stream().readCPI(); | |
765 RiType type = constantPool().lookupType(cpi, CHECKCAST); | |
766 boolean isInitialized = !C1XOptions.TestPatching && type.isResolved() && type.isInitialized(); | |
767 Value typeInstruction = genResolveClass(RiType.Representation.ObjectHub, type, isInitialized, cpi); | |
768 CheckCast c = new CheckCast(type, typeInstruction, apop(), null); | |
769 apush(append(c)); | |
770 checkForDirectCompare(c); | |
771 } | |
772 | |
773 void genInstanceOf() { | |
774 int cpi = stream().readCPI(); | |
775 RiType type = constantPool().lookupType(cpi, INSTANCEOF); | |
776 boolean isInitialized = !C1XOptions.TestPatching && type.isResolved() && type.isInitialized(); | |
777 Value typeInstruction = genResolveClass(RiType.Representation.ObjectHub, type, isInitialized, cpi); | |
778 InstanceOf i = new InstanceOf(type, typeInstruction, apop(), null); | |
779 ipush(append(i)); | |
780 checkForDirectCompare(i); | |
781 } | |
782 | |
783 private void checkForDirectCompare(TypeCheck check) { | |
784 RiType type = check.targetClass(); | |
785 if (!type.isResolved() || type.isArrayClass()) { | |
786 return; | |
787 } | |
788 if (assumeLeafClass(type)) { | |
789 check.setDirectCompare(); | |
790 } | |
791 } | |
792 | |
793 void genNewInstance(int cpi) { | |
794 FrameState stateBefore = curState.immutableCopy(bci()); | |
795 RiType type = constantPool().lookupType(cpi, NEW); | |
796 NewInstance n = new NewInstance(type, cpi, constantPool(), stateBefore); | |
797 if (memoryMap != null) { | |
798 memoryMap.newInstance(n); | |
799 } | |
800 apush(append(n)); | |
801 } | |
802 | |
803 void genNewTypeArray(int typeCode) { | |
804 FrameState stateBefore = curState.immutableCopy(bci()); | |
805 CiKind kind = CiKind.fromArrayTypeCode(typeCode); | |
806 RiType elementType = compilation.runtime.asRiType(kind); | |
807 apush(append(new NewTypeArray(ipop(), elementType, stateBefore))); | |
808 } | |
809 | |
810 void genNewObjectArray(int cpi) { | |
811 RiType type = constantPool().lookupType(cpi, ANEWARRAY); | |
812 FrameState stateBefore = curState.immutableCopy(bci()); | |
813 NewArray n = new NewObjectArray(type, ipop(), stateBefore); | |
814 apush(append(n)); | |
815 } | |
816 | |
817 void genNewMultiArray(int cpi) { | |
818 RiType type = constantPool().lookupType(cpi, MULTIANEWARRAY); | |
819 FrameState stateBefore = curState.immutableCopy(bci()); | |
820 int rank = stream().readUByte(bci() + 3); | |
821 Value[] dims = new Value[rank]; | |
822 for (int i = rank - 1; i >= 0; i--) { | |
823 dims[i] = ipop(); | |
824 } | |
825 NewArray n = new NewMultiArray(type, dims, stateBefore, cpi, constantPool()); | |
826 apush(append(n)); | |
827 } | |
828 | |
829 void genGetField(int cpi, RiField field) { | |
830 // Must copy the state here, because the field holder must still be on the stack. | |
831 FrameState stateBefore = curState.immutableCopy(bci()); | |
832 boolean isLoaded = !C1XOptions.TestPatching && field.isResolved(); | |
833 LoadField load = new LoadField(apop(), field, false, stateBefore, isLoaded); | |
834 appendOptimizedLoadField(field.kind(), load); | |
835 } | |
836 | |
837 void genPutField(int cpi, RiField field) { | |
838 // Must copy the state here, because the field holder must still be on the stack. | |
839 FrameState stateBefore = curState.immutableCopy(bci()); | |
840 boolean isLoaded = !C1XOptions.TestPatching && field.isResolved(); | |
841 Value value = pop(field.kind().stackKind()); | |
842 appendOptimizedStoreField(new StoreField(apop(), field, value, false, stateBefore, isLoaded)); | |
843 } | |
844 | |
845 void genGetStatic(int cpi, RiField field) { | |
846 RiType holder = field.holder(); | |
847 boolean isInitialized = !C1XOptions.TestPatching && field.isResolved() && holder.isResolved() && holder.isInitialized(); | |
848 CiConstant constantValue = null; | |
849 if (isInitialized && C1XOptions.CanonicalizeConstantFields) { | |
850 constantValue = field.constantValue(null); | |
851 } | |
852 if (constantValue != null) { | |
853 push(constantValue.kind.stackKind(), appendConstant(constantValue)); | |
854 } else { | |
855 Value container = genResolveClass(RiType.Representation.StaticFields, holder, isInitialized, cpi); | |
856 LoadField load = new LoadField(container, field, true, null, isInitialized); | |
857 appendOptimizedLoadField(field.kind(), load); | |
858 } | |
859 } | |
860 | |
861 void genPutStatic(int cpi, RiField field) { | |
862 RiType holder = field.holder(); | |
863 boolean isInitialized = !C1XOptions.TestPatching && field.isResolved() && holder.isResolved() && holder.isInitialized(); | |
864 Value container = genResolveClass(RiType.Representation.StaticFields, holder, isInitialized, cpi); | |
865 Value value = pop(field.kind().stackKind()); | |
866 StoreField store = new StoreField(container, field, value, true, null, isInitialized); | |
867 appendOptimizedStoreField(store); | |
868 } | |
869 | |
870 private Value genResolveClass(RiType.Representation representation, RiType holder, boolean initialized, int cpi) { | |
871 Value holderInstr; | |
872 if (initialized) { | |
873 holderInstr = appendConstant(holder.getEncoding(representation)); | |
874 } else { | |
875 holderInstr = append(new ResolveClass(holder, representation, null)); | |
876 } | |
877 return holderInstr; | |
878 } | |
879 | |
880 private void appendOptimizedStoreField(StoreField store) { | |
881 if (memoryMap != null) { | |
882 StoreField previous = memoryMap.store(store); | |
883 if (previous == null) { | |
884 // the store is redundant, do not append | |
885 return; | |
886 } | |
887 } | |
888 append(store); | |
889 } | |
890 | |
891 private void appendOptimizedLoadField(CiKind kind, LoadField load) { | |
892 if (memoryMap != null) { | |
893 Value replacement = memoryMap.load(load); | |
894 if (replacement != load) { | |
895 // the memory buffer found a replacement for this load (no need to append) | |
896 push(kind.stackKind(), replacement); | |
897 return; | |
898 } | |
899 } | |
900 // append the load to the instruction | |
901 Value optimized = append(load); | |
902 if (memoryMap != null && optimized != load) { | |
903 // local optimization happened, replace its value in the memory map | |
904 memoryMap.setResult(load, optimized); | |
905 } | |
906 push(kind.stackKind(), optimized); | |
907 } | |
908 | |
909 /** | |
910 * Temporary work-around to support the @ACCESSOR Maxine annotation. | |
911 */ | |
912 private RiMethod handleInvokeAccessorOrBuiltin(RiMethod target) { | |
913 target = bindAccessorMethod(target); | |
914 if (target.intrinsic() != 0) { | |
915 int intrinsic = target.intrinsic(); | |
916 int opcode = intrinsic & 0xff; | |
917 switch (opcode) { | |
918 case PREAD : genLoadPointer(intrinsic); break; | |
919 case PGET : genLoadPointer(intrinsic); break; | |
920 case PWRITE : genStorePointer(intrinsic); break; | |
921 case PSET : genStorePointer(intrinsic); break; | |
922 case PCMPSWP : genCompareAndSwap(intrinsic); break; | |
923 default: | |
924 throw new CiBailout("unknown bytecode " + opcode + " (" + nameOf(opcode) + ")"); | |
925 } | |
926 return null; | |
927 } | |
928 return target; | |
929 } | |
930 | |
931 void genInvokeStatic(RiMethod target, int cpi, RiConstantPool constantPool) { | |
932 target = handleInvokeAccessorOrBuiltin(target); | |
933 if (target == null) { | |
934 return; | |
935 } | |
936 RiType holder = target.holder(); | |
937 boolean isInitialized = !C1XOptions.TestPatching && target.isResolved() && holder.isInitialized(); | |
938 if (!isInitialized && C1XOptions.ResolveClassBeforeStaticInvoke) { | |
939 // Re-use the same resolution code as for accessing a static field. Even though | |
940 // the result of resolution is not used by the invocation (only the side effect | |
941 // of initialization is required), it can be commoned with static field accesses. | |
942 genResolveClass(RiType.Representation.StaticFields, holder, isInitialized, cpi); | |
943 } | |
944 | |
945 Value[] args = curState.popArguments(target.signature().argumentSlots(false)); | |
946 if (!tryRemoveCall(target, args, true)) { | |
947 if (!tryInline(target, args)) { | |
948 appendInvoke(INVOKESTATIC, target, args, true, cpi, constantPool); | |
949 } | |
950 } | |
951 } | |
952 | |
953 void genInvokeInterface(RiMethod target, int cpi, RiConstantPool constantPool) { | |
954 target = handleInvokeAccessorOrBuiltin(target); | |
955 if (target == null) { | |
956 return; | |
957 } | |
958 Value[] args = curState.popArguments(target.signature().argumentSlots(true)); | |
959 if (!tryRemoveCall(target, args, false)) { | |
960 genInvokeIndirect(INVOKEINTERFACE, target, args, cpi, constantPool); | |
961 } | |
962 } | |
963 | |
964 void genInvokeVirtual(RiMethod target, int cpi, RiConstantPool constantPool) { | |
965 target = handleInvokeAccessorOrBuiltin(target); | |
966 if (target == null) { | |
967 return; | |
968 } | |
969 Value[] args = curState.popArguments(target.signature().argumentSlots(true)); | |
970 if (!tryRemoveCall(target, args, false)) { | |
971 genInvokeIndirect(INVOKEVIRTUAL, target, args, cpi, constantPool); | |
972 } | |
973 } | |
974 | |
975 void genInvokeSpecial(RiMethod target, RiType knownHolder, int cpi, RiConstantPool constantPool) { | |
976 target = handleInvokeAccessorOrBuiltin(target); | |
977 if (target == null) { | |
978 return; | |
979 } | |
980 Value[] args = curState.popArguments(target.signature().argumentSlots(true)); | |
981 if (!tryRemoveCall(target, args, false)) { | |
982 invokeDirect(target, args, knownHolder, cpi, constantPool); | |
983 } | |
984 } | |
985 | |
986 /** | |
987 * Temporary work-around to support the @ACCESSOR Maxine annotation. | |
988 */ | |
989 private static final Class<?> Accessor; | |
990 static { | |
991 Class<?> c = null; | |
992 try { | |
993 c = Class.forName("com.sun.max.unsafe.Accessor"); | |
994 } catch (ClassNotFoundException e) { | |
995 } | |
996 Accessor = c; | |
997 } | |
998 | |
999 /** | |
1000 * Temporary work-around to support the @ACCESSOR Maxine annotation. | |
1001 */ | |
1002 private static ThreadLocal<RiType> boundAccessor = new ThreadLocal<RiType>(); | |
1003 | |
1004 /** | |
1005 * Temporary work-around to support the @ACCESSOR Maxine annotation. | |
1006 */ | |
1007 private static RiMethod bindAccessorMethod(RiMethod target) { | |
1008 if (Accessor != null && target.isResolved() && target.holder().javaClass() == Accessor) { | |
1009 RiType accessor = boundAccessor.get(); | |
1010 assert accessor != null : "Cannot compile call to method in " + target.holder() + " without enclosing @ACCESSOR annotated method"; | |
1011 RiMethod newTarget = accessor.resolveMethodImpl(target); | |
1012 assert target != newTarget : "Could not bind " + target + " to a method in " + accessor; | |
1013 target = newTarget; | |
1014 } | |
1015 return target; | |
1016 } | |
1017 | |
1018 /** | |
1019 * Temporary work-around to support the @ACCESSOR Maxine annotation. | |
1020 */ | |
1021 private boolean inlineWithBoundAccessor(RiMethod target, Value[] args, boolean forcedInline) { | |
1022 RiType accessor = target.accessor(); | |
1023 if (accessor != null) { | |
1024 assert boundAccessor.get() == null; | |
1025 boundAccessor.set(accessor); | |
1026 try { | |
1027 // What looks like an object receiver in the bytecode may not be a word value | |
1028 compilation.setNotTypesafe(); | |
1029 inline(target, args, forcedInline); | |
1030 } finally { | |
1031 boundAccessor.set(null); | |
1032 } | |
1033 return true; | |
1034 } | |
1035 return false; | |
1036 } | |
1037 | |
1038 private void genInvokeIndirect(int opcode, RiMethod target, Value[] args, int cpi, RiConstantPool constantPool) { | |
1039 Value receiver = args[0]; | |
1040 // attempt to devirtualize the call | |
1041 if (target.isResolved()) { | |
1042 RiType klass = target.holder(); | |
1043 | |
1044 // 0. check for trivial cases | |
1045 if (target.canBeStaticallyBound() && !isAbstract(target.accessFlags())) { | |
1046 // check for trivial cases (e.g. final methods, nonvirtual methods) | |
1047 invokeDirect(target, args, target.holder(), cpi, constantPool); | |
1048 return; | |
1049 } | |
1050 // 1. check if the exact type of the receiver can be determined | |
1051 RiType exact = getExactType(klass, receiver); | |
1052 if (exact != null && exact.isResolved()) { | |
1053 // either the holder class is exact, or the receiver object has an exact type | |
1054 invokeDirect(exact.resolveMethodImpl(target), args, exact, cpi, constantPool); | |
1055 return; | |
1056 } | |
1057 // 2. check if an assumed leaf method can be found | |
1058 RiMethod leaf = getAssumedLeafMethod(target, receiver); | |
1059 if (leaf != null && leaf.isResolved() && !isAbstract(leaf.accessFlags()) && leaf.holder().isResolved()) { | |
1060 if (C1XOptions.PrintAssumptions) { | |
1061 TTY.println("Optimistic invoke direct because of leaf method to " + leaf); | |
1062 } | |
1063 invokeDirect(leaf, args, null, cpi, constantPool); | |
1064 return; | |
1065 } else if (C1XOptions.PrintAssumptions) { | |
1066 TTY.println("Could not make leaf method assumption for target=" + target + " leaf=" + leaf + " receiver.declaredType=" + receiver.declaredType()); | |
1067 } | |
1068 // 3. check if the either of the holder or declared type of receiver can be assumed to be a leaf | |
1069 exact = getAssumedLeafType(klass, receiver); | |
1070 if (exact != null && exact.isResolved()) { | |
1071 RiMethod targetMethod = exact.resolveMethodImpl(target); | |
1072 if (C1XOptions.PrintAssumptions) { | |
1073 TTY.println("Optimistic invoke direct because of leaf type to " + targetMethod); | |
1074 } | |
1075 // either the holder class is exact, or the receiver object has an exact type | |
1076 invokeDirect(targetMethod, args, exact, cpi, constantPool); | |
1077 return; | |
1078 } else if (C1XOptions.PrintAssumptions) { | |
1079 TTY.println("Could not make leaf type assumption for type " + klass); | |
1080 } | |
1081 } | |
1082 // devirtualization failed, produce an actual invokevirtual | |
1083 appendInvoke(opcode, target, args, false, cpi, constantPool); | |
1084 } | |
1085 | |
1086 private CiKind returnKind(RiMethod target) { | |
1087 return target.signature().returnKind(); | |
1088 } | |
1089 | |
1090 private void invokeDirect(RiMethod target, Value[] args, RiType knownHolder, int cpi, RiConstantPool constantPool) { | |
1091 if (!tryInline(target, args)) { | |
1092 // could not optimize or inline the method call | |
1093 appendInvoke(INVOKESPECIAL, target, args, false, cpi, constantPool); | |
1094 } | |
1095 } | |
1096 | |
1097 private void appendInvoke(int opcode, RiMethod target, Value[] args, boolean isStatic, int cpi, RiConstantPool constantPool) { | |
1098 CiKind resultType = returnKind(target); | |
1099 Value result = append(new Invoke(opcode, resultType.stackKind(), args, isStatic, target, target.signature().returnType(compilation.method.holder()), null)); | |
1100 pushReturn(resultType, result); | |
1101 } | |
1102 | |
1103 private RiType getExactType(RiType staticType, Value receiver) { | |
1104 RiType exact = staticType.exactType(); | |
1105 if (exact == null) { | |
1106 exact = receiver.exactType(); | |
1107 if (exact == null) { | |
1108 if (receiver.isConstant()) { | |
1109 exact = compilation.runtime.getTypeOf(receiver.asConstant()); | |
1110 } | |
1111 if (exact == null) { | |
1112 RiType declared = receiver.declaredType(); | |
1113 exact = declared == null || !declared.isResolved() ? null : declared.exactType(); | |
1114 } | |
1115 } | |
1116 } | |
1117 return exact; | |
1118 } | |
1119 | |
1120 private RiType getAssumedLeafType(RiType type) { | |
1121 if (isFinal(type.accessFlags())) { | |
1122 return type; | |
1123 } | |
1124 RiType assumed = null; | |
1125 if (C1XOptions.UseAssumptions) { | |
1126 assumed = type.uniqueConcreteSubtype(); | |
1127 if (assumed != null) { | |
1128 if (C1XOptions.PrintAssumptions) { | |
1129 TTY.println("Recording concrete subtype assumption in context of " + type.name() + ": " + assumed.name()); | |
1130 } | |
1131 compilation.assumptions.recordConcreteSubtype(type, assumed); | |
1132 } | |
1133 } | |
1134 return assumed; | |
1135 } | |
1136 | |
1137 private RiType getAssumedLeafType(RiType staticType, Value receiver) { | |
1138 RiType assumed = getAssumedLeafType(staticType); | |
1139 if (assumed != null) { | |
1140 return assumed; | |
1141 } | |
1142 RiType declared = receiver.declaredType(); | |
1143 if (declared != null && declared.isResolved()) { | |
1144 assumed = getAssumedLeafType(declared); | |
1145 return assumed; | |
1146 } | |
1147 return null; | |
1148 } | |
1149 | |
1150 private RiMethod getAssumedLeafMethod(RiMethod target, Value receiver) { | |
1151 RiMethod assumed = getAssumedLeafMethod(target); | |
1152 if (assumed != null) { | |
1153 return assumed; | |
1154 } | |
1155 RiType declared = receiver.declaredType(); | |
1156 if (declared != null && declared.isResolved() && !declared.isInterface()) { | |
1157 RiMethod impl = declared.resolveMethodImpl(target); | |
1158 if (impl != null) { | |
1159 assumed = getAssumedLeafMethod(impl); | |
1160 } | |
1161 } | |
1162 return assumed; | |
1163 } | |
1164 | |
1165 void callRegisterFinalizer() { | |
1166 Value receiver = curState.loadLocal(0); | |
1167 RiType declaredType = receiver.declaredType(); | |
1168 RiType receiverType = declaredType; | |
1169 RiType exactType = receiver.exactType(); | |
1170 if (exactType == null && declaredType != null) { | |
1171 exactType = declaredType.exactType(); | |
1172 } | |
1173 if (exactType == null && receiver instanceof Local && ((Local) receiver).javaIndex() == 0) { | |
1174 // the exact type isn't known, but the receiver is parameter 0 => use holder | |
1175 receiverType = compilation.method.holder(); | |
1176 exactType = receiverType.exactType(); | |
1177 } | |
1178 boolean needsCheck = true; | |
1179 if (exactType != null) { | |
1180 // we have an exact type | |
1181 needsCheck = exactType.hasFinalizer(); | |
1182 } else { | |
1183 // if either the declared type of receiver or the holder can be assumed to have no finalizers | |
1184 if (declaredType != null && !declaredType.hasFinalizableSubclass()) { | |
1185 if (compilation.recordNoFinalizableSubclassAssumption(declaredType)) { | |
1186 needsCheck = false; | |
1187 } | |
1188 } | |
1189 | |
1190 if (receiverType != null && !receiverType.hasFinalizableSubclass()) { | |
1191 if (compilation.recordNoFinalizableSubclassAssumption(receiverType)) { | |
1192 needsCheck = false; | |
1193 } | |
1194 } | |
1195 } | |
1196 | |
1197 if (needsCheck) { | |
1198 // append a call to the registration intrinsic | |
1199 loadLocal(0, CiKind.Object); | |
1200 FrameState stateBefore = curState.immutableCopy(bci()); | |
1201 append(new Intrinsic(CiKind.Void, C1XIntrinsic.java_lang_Object$init, | |
1202 null, curState.popArguments(1), false, stateBefore, true, true)); | |
1203 C1XMetrics.InlinedFinalizerChecks++; | |
1204 } | |
1205 } | |
1206 | |
1207 void genReturn(Value x) { | |
1208 if (C1XIntrinsic.getIntrinsic(method()) == C1XIntrinsic.java_lang_Object$init) { | |
1209 callRegisterFinalizer(); | |
1210 } | |
1211 | |
1212 // If inlining, then returns become gotos to the continuation point. | |
1213 if (scopeData.continuation() != null) { | |
1214 if (isSynchronized(method().accessFlags())) { | |
1215 // if the inlined method is synchronized, then the monitor | |
1216 // must be released before jumping to the continuation point | |
1217 assert C1XOptions.OptInlineSynchronized; | |
1218 Value object = curState.lockAt(0); | |
1219 if (object instanceof Instruction) { | |
1220 Instruction obj = (Instruction) object; | |
1221 if (!obj.isAppended()) { | |
1222 appendWithoutOptimization(obj, Instruction.SYNCHRONIZATION_ENTRY_BCI); | |
1223 } | |
1224 } | |
1225 genMonitorExit(object, Instruction.SYNCHRONIZATION_ENTRY_BCI); | |
1226 } | |
1227 | |
1228 // empty stack for return value | |
1229 curState.truncateStack(0); | |
1230 if (x != null) { | |
1231 curState.push(x.kind, x); | |
1232 } | |
1233 Goto gotoCallee = new Goto(scopeData.continuation(), null, false); | |
1234 | |
1235 // ATTN: assumption: curState is not used further down, else add .immutableCopy() | |
1236 scopeData.updateSimpleInlineInfo(curBlock, lastInstr, curState); | |
1237 | |
1238 // State at end of inlined method is the state of the caller | |
1239 // without the method parameters on stack, including the | |
1240 // return value, if any, of the inlined method on operand stack. | |
1241 curState = scopeData.continuationState().copy(); | |
1242 if (x != null) { | |
1243 curState.push(x.kind, x); | |
1244 } | |
1245 | |
1246 // The current bci is in the wrong scope, so use the bci of the continuation point. | |
1247 appendWithoutOptimization(gotoCallee, scopeData.continuation().bci()); | |
1248 return; | |
1249 } | |
1250 | |
1251 curState.truncateStack(0); | |
1252 if (Modifier.isSynchronized(method().accessFlags())) { | |
1253 FrameState stateBefore = curState.immutableCopy(bci()); | |
1254 // unlock before exiting the method | |
1255 int lockNumber = curState.totalLocksSize() - 1; | |
1256 MonitorAddress lockAddress = null; | |
1257 if (compilation.runtime.sizeOfBasicObjectLock() != 0) { | |
1258 lockAddress = new MonitorAddress(lockNumber); | |
1259 append(lockAddress); | |
1260 } | |
1261 append(new MonitorExit(rootMethodSynchronizedObject, lockAddress, lockNumber, stateBefore)); | |
1262 curState.unlock(); | |
1263 } | |
1264 append(new Return(x, !scopeData.noSafepoints())); | |
1265 } | |
1266 | |
1267 /** | |
1268 * Gets the number of locks held. | |
1269 */ | |
1270 private int locksSize() { | |
1271 return curState.locksSize(); | |
1272 } | |
1273 | |
1274 void genMonitorEnter(Value x, int bci) { | |
1275 int lockNumber = locksSize(); | |
1276 MonitorAddress lockAddress = null; | |
1277 if (compilation.runtime.sizeOfBasicObjectLock() != 0) { | |
1278 lockAddress = new MonitorAddress(lockNumber); | |
1279 append(lockAddress); | |
1280 } | |
1281 MonitorEnter monitorEnter = new MonitorEnter(x, lockAddress, lockNumber, null); | |
1282 appendWithoutOptimization(monitorEnter, bci); | |
1283 curState.lock(scope(), x, lockNumber + 1); | |
1284 monitorEnter.setStateAfter(curState.immutableCopy(bci)); | |
1285 killMemoryMap(); // prevent any optimizations across synchronization | |
1286 } | |
1287 | |
1288 void genMonitorExit(Value x, int bci) { | |
1289 int lockNumber = curState.totalLocksSize() - 1; | |
1290 if (lockNumber < 0) { | |
1291 throw new CiBailout("monitor stack underflow"); | |
1292 } | |
1293 MonitorAddress lockAddress = null; | |
1294 if (compilation.runtime.sizeOfBasicObjectLock() != 0) { | |
1295 lockAddress = new MonitorAddress(lockNumber); | |
1296 append(lockAddress); | |
1297 } | |
1298 appendWithoutOptimization(new MonitorExit(x, lockAddress, lockNumber, null), bci); | |
1299 curState.unlock(); | |
1300 killMemoryMap(); // prevent any optimizations across synchronization | |
1301 } | |
1302 | |
1303 void genJsr(int dest) { | |
1304 for (ScopeData cur = scopeData; cur != null && cur.parsingJsr() && cur.scope == scope(); cur = cur.parent) { | |
1305 if (cur.jsrEntryBCI() == dest) { | |
1306 // the jsr/ret pattern includes a recursive invocation | |
1307 throw new CiBailout("recursive jsr/ret structure"); | |
1308 } | |
1309 } | |
1310 push(CiKind.Jsr, append(Constant.forJsr(nextBCI()))); | |
1311 tryInlineJsr(dest); | |
1312 } | |
1313 | |
1314 void genRet(int localIndex) { | |
1315 if (!scopeData.parsingJsr()) { | |
1316 throw new CiBailout("ret encountered when not parsing subroutine"); | |
1317 } | |
1318 | |
1319 if (localIndex != scopeData.jsrEntryReturnAddressLocal()) { | |
1320 throw new CiBailout("jsr/ret structure is too complicated"); | |
1321 } | |
1322 // rets become non-safepoint gotos | |
1323 append(new Goto(scopeData.jsrContinuation(), null, false)); | |
1324 } | |
1325 | |
1326 void genTableswitch() { | |
1327 int bci = bci(); | |
1328 BytecodeTableSwitch ts = new BytecodeTableSwitch(stream(), bci); | |
1329 int max = ts.numberOfCases(); | |
1330 List<BlockBegin> list = new ArrayList<BlockBegin>(max + 1); | |
1331 boolean isBackwards = false; | |
1332 for (int i = 0; i < max; i++) { | |
1333 // add all successors to the successor list | |
1334 int offset = ts.offsetAt(i); | |
1335 list.add(blockAt(bci + offset)); | |
1336 isBackwards |= offset < 0; // track if any of the successors are backwards | |
1337 } | |
1338 int offset = ts.defaultOffset(); | |
1339 isBackwards |= offset < 0; // if the default successor is backwards | |
1340 list.add(blockAt(bci + offset)); | |
1341 boolean isSafepoint = isBackwards && !scopeData.noSafepoints(); | |
1342 FrameState stateBefore = isSafepoint ? curState.immutableCopy(bci()) : null; | |
1343 append(new TableSwitch(ipop(), list, ts.lowKey(), stateBefore, isSafepoint)); | |
1344 } | |
1345 | |
1346 void genLookupswitch() { | |
1347 int bci = bci(); | |
1348 BytecodeLookupSwitch ls = new BytecodeLookupSwitch(stream(), bci); | |
1349 int max = ls.numberOfCases(); | |
1350 List<BlockBegin> list = new ArrayList<BlockBegin>(max + 1); | |
1351 int[] keys = new int[max]; | |
1352 boolean isBackwards = false; | |
1353 for (int i = 0; i < max; i++) { | |
1354 // add all successors to the successor list | |
1355 int offset = ls.offsetAt(i); | |
1356 list.add(blockAt(bci + offset)); | |
1357 keys[i] = ls.keyAt(i); | |
1358 isBackwards |= offset < 0; // track if any of the successors are backwards | |
1359 } | |
1360 int offset = ls.defaultOffset(); | |
1361 isBackwards |= offset < 0; // if the default successor is backwards | |
1362 list.add(blockAt(bci + offset)); | |
1363 boolean isSafepoint = isBackwards && !scopeData.noSafepoints(); | |
1364 FrameState stateBefore = isSafepoint ? curState.immutableCopy(bci()) : null; | |
1365 append(new LookupSwitch(ipop(), list, keys, stateBefore, isSafepoint)); | |
1366 } | |
1367 | |
1368 /** | |
1369 * Determines whether the length of an array should be extracted out as a separate instruction | |
1370 * before an array indexing instruction. This exposes it to CSE. | |
1371 * @param array | |
1372 * @return | |
1373 */ | |
1374 private boolean cseArrayLength(Value array) { | |
1375 // checks whether an array length access should be generated for CSE | |
1376 if (C1XOptions.OptCSEArrayLength) { | |
1377 // always access the length for CSE | |
1378 return true; | |
1379 } else if (array.isConstant()) { | |
1380 // the array itself is a constant | |
1381 return true; | |
1382 } else if (array instanceof LoadField && ((LoadField) array).constantValue() != null) { | |
1383 // the length is derived from a constant array | |
1384 return true; | |
1385 } else if (array instanceof NewArray) { | |
1386 // the array is derived from an allocation | |
1387 final Value length = ((NewArray) array).length(); | |
1388 return length != null && length.isConstant(); | |
1389 } | |
1390 return false; | |
1391 } | |
1392 | |
1393 private Value appendConstant(CiConstant type) { | |
1394 return appendWithBCI(new Constant(type), bci(), false); | |
1395 } | |
1396 | |
1397 private Value append(Instruction x) { | |
1398 return appendWithBCI(x, bci(), C1XOptions.OptCanonicalize); | |
1399 } | |
1400 | |
1401 private Value appendWithoutOptimization(Instruction x, int bci) { | |
1402 return appendWithBCI(x, bci, false); | |
1403 } | |
1404 | |
1405 private Value appendWithBCI(Instruction x, int bci, boolean canonicalize) { | |
1406 if (canonicalize) { | |
1407 // attempt simple constant folding and strength reduction | |
1408 Value r = canonicalizer.canonicalize(x); | |
1409 List<Instruction> extra = canonicalizer.extra(); | |
1410 if (extra != null) { | |
1411 // the canonicalization introduced instructions that should be added before this | |
1412 for (Instruction i : extra) { | |
1413 appendWithBCI(i, bci, false); // don't try to canonicalize the new instructions | |
1414 } | |
1415 } | |
1416 if (r instanceof Instruction) { | |
1417 // the result is an instruction that may need to be appended | |
1418 x = (Instruction) r; | |
1419 } else { | |
1420 // the result is not an instruction (and thus cannot be appended) | |
1421 return r; | |
1422 } | |
1423 } | |
1424 if (x.isAppended()) { | |
1425 // the instruction has already been added | |
1426 return x; | |
1427 } | |
1428 if (localValueMap != null) { | |
1429 // look in the local value map | |
1430 Value r = localValueMap.findInsert(x); | |
1431 if (r != x) { | |
1432 C1XMetrics.LocalValueNumberHits++; | |
1433 if (r instanceof Instruction) { | |
1434 assert ((Instruction) r).isAppended() : "instruction " + r + "is not appended"; | |
1435 } | |
1436 return r; | |
1437 } | |
1438 } | |
1439 | |
1440 assert x.next() == null : "instruction should not have been appended yet"; | |
1441 assert lastInstr.next() == null : "cannot append instruction to instruction which isn't end (" + lastInstr + "->" + lastInstr.next() + ")"; | |
1442 if (lastInstr instanceof Base) { | |
1443 assert x instanceof Intrinsic : "may only happen when inlining intrinsics"; | |
1444 Instruction prev = lastInstr.prev(lastInstr.block()); | |
1445 prev.setNext(x, bci); | |
1446 x.setNext(lastInstr, bci); | |
1447 } else { | |
1448 lastInstr = lastInstr.setNext(x, bci); | |
1449 } | |
1450 if (++stats.nodeCount >= C1XOptions.MaximumInstructionCount) { | |
1451 // bailout if we've exceeded the maximum inlining size | |
1452 throw new CiBailout("Method and/or inlining is too large"); | |
1453 } | |
1454 | |
1455 if (memoryMap != null && hasUncontrollableSideEffects(x)) { | |
1456 // conservatively kill all memory if there are unknown side effects | |
1457 memoryMap.kill(); | |
1458 } | |
1459 | |
1460 if (x instanceof StateSplit) { | |
1461 StateSplit stateSplit = (StateSplit) x; | |
1462 if (!stateSplit.isStateCleared() && stateSplit.stateBefore() == null) { | |
1463 stateSplit.setStateBefore(curState.immutableCopy(bci)); | |
1464 } | |
1465 } | |
1466 | |
1467 if (x.canTrap()) { | |
1468 // connect the instruction to any exception handlers | |
1469 x.setExceptionHandlers(handleException(x, bci)); | |
1470 } | |
1471 | |
1472 return x; | |
1473 } | |
1474 | |
1475 private boolean hasUncontrollableSideEffects(Value x) { | |
1476 return x instanceof Invoke || x instanceof Intrinsic && !((Intrinsic) x).preservesState() || x instanceof ResolveClass; | |
1477 } | |
1478 | |
1479 private BlockBegin blockAtOrNull(int bci) { | |
1480 return scopeData.blockAt(bci); | |
1481 } | |
1482 | |
1483 private BlockBegin blockAt(int bci) { | |
1484 BlockBegin result = blockAtOrNull(bci); | |
1485 assert result != null : "Expected a block to begin at " + bci; | |
1486 return result; | |
1487 } | |
1488 | |
1489 boolean tryInlineJsr(int jsrStart) { | |
1490 // start a new continuation point. | |
1491 // all ret instructions will be replaced with gotos to this point | |
1492 BlockBegin cont = blockAt(nextBCI()); | |
1493 | |
1494 // push callee scope | |
1495 pushScopeForJsr(cont, jsrStart); | |
1496 | |
1497 BlockBegin jsrStartBlock = blockAt(jsrStart); | |
1498 assert !jsrStartBlock.wasVisited(); | |
1499 Goto gotoSub = new Goto(jsrStartBlock, null, false); | |
1500 gotoSub.setStateAfter(curState.immutableCopy(bci())); | |
1501 assert jsrStartBlock.stateBefore() == null; | |
1502 jsrStartBlock.setStateBefore(curState.immutableCopy(bci())); | |
1503 append(gotoSub); | |
1504 curBlock.setEnd(gotoSub); | |
1505 lastInstr = curBlock = jsrStartBlock; | |
1506 | |
1507 scopeData.addToWorkList(jsrStartBlock); | |
1508 | |
1509 iterateAllBlocks(); | |
1510 | |
1511 if (cont.stateBefore() != null) { | |
1512 if (!cont.wasVisited()) { | |
1513 scopeData.parent.addToWorkList(cont); | |
1514 } | |
1515 } | |
1516 | |
1517 BlockBegin jsrCont = scopeData.jsrContinuation(); | |
1518 assert jsrCont == cont && (!jsrCont.wasVisited() || jsrCont.isParserLoopHeader()); | |
1519 assert lastInstr != null && lastInstr instanceof BlockEnd; | |
1520 | |
1521 // continuation is in work list, so end iteration of current block | |
1522 skipBlock = true; | |
1523 popScopeForJsr(); | |
1524 C1XMetrics.InlinedJsrs++; | |
1525 return true; | |
1526 } | |
1527 | |
1528 void pushScopeForJsr(BlockBegin jsrCont, int jsrStart) { | |
1529 BytecodeStream stream = new BytecodeStream(scope().method.code()); | |
1530 RiConstantPool constantPool = scopeData.constantPool; | |
1531 ScopeData data = new ScopeData(scopeData, scope(), scopeData.blockMap, stream, constantPool, jsrStart); | |
1532 BlockBegin continuation = scopeData.continuation(); | |
1533 data.setContinuation(continuation); | |
1534 if (continuation != null) { | |
1535 assert scopeData.continuationState() != null; | |
1536 data.setContinuationState(scopeData.continuationState().copy()); | |
1537 } | |
1538 data.setJsrContinuation(jsrCont); | |
1539 scopeData = data; | |
1540 } | |
1541 | |
1542 void pushScope(RiMethod target, BlockBegin continuation) { | |
1543 // prepare callee scope | |
1544 IRScope calleeScope = new IRScope(scope(), curState.immutableCopy(bci()), target, -1); | |
1545 BlockMap blockMap = compilation.getBlockMap(calleeScope.method, -1); | |
1546 calleeScope.setStoresInLoops(blockMap.getStoresInLoops()); | |
1547 // prepare callee state | |
1548 curState = curState.pushScope(calleeScope); | |
1549 BytecodeStream stream = new BytecodeStream(target.code()); | |
1550 RiConstantPool constantPool = compilation.runtime.getConstantPool(target); | |
1551 ScopeData data = new ScopeData(scopeData, calleeScope, blockMap, stream, constantPool); | |
1552 data.setContinuation(continuation); | |
1553 scopeData = data; | |
1554 } | |
1555 | |
1556 MutableFrameState stateAtEntry(RiMethod method) { | |
1557 MutableFrameState state = new MutableFrameState(scope(), -1, method.maxLocals(), method.maxStackSize()); | |
1558 int index = 0; | |
1559 if (!isStatic(method.accessFlags())) { | |
1560 // add the receiver and assume it is non null | |
1561 Local local = new Local(method.holder().kind(), index); | |
1562 local.setFlag(Value.Flag.NonNull, true); | |
1563 local.setDeclaredType(method.holder()); | |
1564 state.storeLocal(index, local); | |
1565 index = 1; | |
1566 } | |
1567 RiSignature sig = method.signature(); | |
1568 int max = sig.argumentCount(false); | |
1569 RiType accessingClass = method.holder(); | |
1570 for (int i = 0; i < max; i++) { | |
1571 RiType type = sig.argumentTypeAt(i, accessingClass); | |
1572 CiKind kind = type.kind().stackKind(); | |
1573 Local local = new Local(kind, index); | |
1574 if (type.isResolved()) { | |
1575 local.setDeclaredType(type); | |
1576 } | |
1577 state.storeLocal(index, local); | |
1578 index += kind.sizeInSlots(); | |
1579 } | |
1580 return state; | |
1581 } | |
1582 | |
1583 boolean tryRemoveCall(RiMethod target, Value[] args, boolean isStatic) { | |
1584 if (target.isResolved()) { | |
1585 if (C1XOptions.OptIntrinsify) { | |
1586 // try to create an intrinsic node instead of a call | |
1587 C1XIntrinsic intrinsic = C1XIntrinsic.getIntrinsic(target); | |
1588 if (intrinsic != null && tryInlineIntrinsic(target, args, isStatic, intrinsic)) { | |
1589 // this method is not an intrinsic | |
1590 return true; | |
1591 } | |
1592 } | |
1593 if (C1XOptions.CanonicalizeFoldableMethods) { | |
1594 // next try to fold the method call | |
1595 if (tryFoldable(target, args)) { | |
1596 return true; | |
1597 } | |
1598 } | |
1599 } | |
1600 return false; | |
1601 } | |
1602 | |
1603 private boolean tryInlineIntrinsic(RiMethod target, Value[] args, boolean isStatic, C1XIntrinsic intrinsic) { | |
1604 boolean preservesState = true; | |
1605 boolean canTrap = false; | |
1606 | |
1607 Instruction result = null; | |
1608 | |
1609 // handle intrinsics differently | |
1610 switch (intrinsic) { | |
1611 | |
1612 case java_lang_System$arraycopy: | |
1613 if (compilation.runtime.supportsArrayIntrinsics()) { | |
1614 break; | |
1615 } else { | |
1616 return false; | |
1617 } | |
1618 case java_lang_Object$getClass: | |
1619 canTrap = true; | |
1620 break; | |
1621 case java_lang_Thread$currentThread: | |
1622 break; | |
1623 case java_util_Arrays$copyOf: | |
1624 if (args[0].declaredType() != null && args[0].declaredType().isArrayClass() && compilation.runtime.supportsArrayIntrinsics()) { | |
1625 break; | |
1626 } else { | |
1627 return false; | |
1628 } | |
1629 case java_lang_Object$init: // fall through | |
1630 case java_lang_String$equals: // fall through | |
1631 case java_lang_String$compareTo: // fall through | |
1632 case java_lang_String$indexOf: // fall through | |
1633 case java_lang_Math$max: // fall through | |
1634 case java_lang_Math$min: // fall through | |
1635 case java_lang_Math$atan2: // fall through | |
1636 case java_lang_Math$pow: // fall through | |
1637 case java_lang_Math$exp: // fall through | |
1638 case java_nio_Buffer$checkIndex: // fall through | |
1639 case java_lang_System$identityHashCode: // fall through | |
1640 case java_lang_System$currentTimeMillis: // fall through | |
1641 case java_lang_System$nanoTime: // fall through | |
1642 case java_lang_Object$hashCode: // fall through | |
1643 case java_lang_Class$isAssignableFrom: // fall through | |
1644 case java_lang_Class$isInstance: // fall through | |
1645 case java_lang_Class$getModifiers: // fall through | |
1646 case java_lang_Class$isInterface: // fall through | |
1647 case java_lang_Class$isArray: // fall through | |
1648 case java_lang_Class$isPrimitive: // fall through | |
1649 case java_lang_Class$getSuperclass: // fall through | |
1650 case java_lang_Class$getComponentType: // fall through | |
1651 case java_lang_reflect_Array$getLength: // fall through | |
1652 case java_lang_reflect_Array$newArray: // fall through | |
1653 case java_lang_Double$doubleToLongBits: // fall through | |
1654 case java_lang_Float$floatToIntBits: // fall through | |
1655 case java_lang_Math$sin: // fall through | |
1656 case java_lang_Math$cos: // fall through | |
1657 case java_lang_Math$tan: // fall through | |
1658 case java_lang_Math$log: // fall through | |
1659 case java_lang_Math$log10: // fall through | |
1660 case java_lang_Integer$bitCount: // fall through | |
1661 case java_lang_Integer$reverseBytes: // fall through | |
1662 case java_lang_Long$bitCount: // fall through | |
1663 case java_lang_Long$reverseBytes: // fall through | |
1664 case java_lang_Object$clone: return false; | |
1665 // TODO: preservesState and canTrap for complex intrinsics | |
1666 } | |
1667 | |
1668 | |
1669 | |
1670 // get the arguments for the intrinsic | |
1671 CiKind resultType = returnKind(target); | |
1672 | |
1673 if (C1XOptions.PrintInlinedIntrinsics) { | |
1674 TTY.println("Inlining intrinsic: " + intrinsic); | |
1675 } | |
1676 | |
1677 // Create state before intrinsic. | |
1678 for (int i = 0; i < args.length; ++i) { | |
1679 if (args[i] != null) { | |
1680 curState.push(args[i].kind.stackKind(), args[i]); | |
1681 } | |
1682 } | |
1683 | |
1684 // Create the intrinsic node. | |
1685 if (intrinsic == C1XIntrinsic.java_lang_System$arraycopy) { | |
1686 result = genArrayCopy(target, args); | |
1687 } else if (intrinsic == C1XIntrinsic.java_util_Arrays$copyOf) { | |
1688 result = genArrayClone(target, args); | |
1689 } else { | |
1690 result = new Intrinsic(resultType.stackKind(), intrinsic, target, args, isStatic, curState.immutableCopy(bci()), preservesState, canTrap); | |
1691 } | |
1692 | |
1693 // Pop arguments. | |
1694 curState.popArguments(args.length); | |
1695 | |
1696 pushReturn(resultType, append(result)); | |
1697 stats.intrinsicCount++; | |
1698 return true; | |
1699 } | |
1700 | |
1701 private Instruction genArrayClone(RiMethod target, Value[] args) { | |
1702 FrameState state = curState.immutableCopy(bci()); | |
1703 Value array = args[0]; | |
1704 RiType type = array.declaredType(); | |
1705 assert type != null && type.isResolved() && type.isArrayClass(); | |
1706 Value newLength = args[1]; | |
1707 | |
1708 Value oldLength = append(new ArrayLength(array, state)); | |
1709 Value newArray = append(new NewObjectArrayClone(newLength, array, state)); | |
1710 Value copyLength = append(new IfOp(newLength, Condition.LT, oldLength, newLength, oldLength)); | |
1711 append(new ArrayCopy(array, Constant.forInt(0), newArray, Constant.forInt(0), copyLength, null, null)); | |
1712 return (Instruction) newArray; | |
1713 } | |
1714 | |
1715 private Instruction genArrayCopy(RiMethod target, Value[] args) { | |
1716 FrameState state = curState.immutableCopy(bci()); | |
1717 Instruction result; | |
1718 Value src = args[0]; | |
1719 Value srcPos = args[1]; | |
1720 Value dest = args[2]; | |
1721 Value destPos = args[3]; | |
1722 Value length = args[4]; | |
1723 | |
1724 // Check src start pos. | |
1725 Value srcLength = append(new ArrayLength(src, state)); | |
1726 | |
1727 // Check dest start pos. | |
1728 Value destLength = srcLength; | |
1729 if (src != dest) { | |
1730 destLength = append(new ArrayLength(dest, state)); | |
1731 } | |
1732 | |
1733 // Check src end pos. | |
1734 Value srcEndPos = append(new ArithmeticOp(IADD, CiKind.Int, srcPos, length, false, null)); | |
1735 append(new BoundsCheck(srcEndPos, srcLength, state, Condition.LE)); | |
1736 | |
1737 // Check dest end pos. | |
1738 Value destEndPos = srcEndPos; | |
1739 if (destPos != srcPos) { | |
1740 destEndPos = append(new ArithmeticOp(IADD, CiKind.Int, destPos, length, false, null)); | |
1741 } | |
1742 append(new BoundsCheck(destEndPos, destLength, state, Condition.LE)); | |
1743 | |
1744 Value zero = append(Constant.forInt(0)); | |
1745 append(new BoundsCheck(length, zero, state, Condition.GE)); | |
1746 append(new BoundsCheck(srcPos, zero, state, Condition.GE)); | |
1747 append(new BoundsCheck(destPos, zero, state, Condition.GE)); | |
1748 | |
1749 result = new ArrayCopy(src, srcPos, dest, destPos, length, target, state); | |
1750 return result; | |
1751 } | |
1752 | |
1753 private boolean tryFoldable(RiMethod target, Value[] args) { | |
1754 CiConstant result = Canonicalizer.foldInvocation(compilation.runtime, target, args); | |
1755 if (result != null) { | |
1756 if (C1XOptions.TraceBytecodeParserLevel > 0) { | |
1757 log.println("|"); | |
1758 log.println("| [folded " + target + " --> " + result + "]"); | |
1759 log.println("|"); | |
1760 } | |
1761 | |
1762 pushReturn(returnKind(target), append(new Constant(result))); | |
1763 return true; | |
1764 } | |
1765 return false; | |
1766 } | |
1767 | |
1768 private boolean tryInline(RiMethod target, Value[] args) { | |
1769 boolean forcedInline = compilation.runtime.mustInline(target); | |
1770 if (forcedInline) { | |
1771 for (IRScope scope = scope().caller; scope != null; scope = scope.caller) { | |
1772 if (scope.method.equals(target)) { | |
1773 throw new CiBailout("Cannot recursively inline method that is force-inlined: " + target); | |
1774 } | |
1775 } | |
1776 C1XMetrics.InlineForcedMethods++; | |
1777 } | |
1778 if (forcedInline || checkInliningConditions(target)) { | |
1779 if (C1XOptions.TraceBytecodeParserLevel > 0) { | |
1780 log.adjustIndentation(1); | |
1781 log.println("\\"); | |
1782 log.adjustIndentation(1); | |
1783 if (C1XOptions.TraceBytecodeParserLevel < TRACELEVEL_STATE) { | |
1784 log.println("| [inlining " + target + "]"); | |
1785 log.println("|"); | |
1786 } | |
1787 } | |
1788 if (!inlineWithBoundAccessor(target, args, forcedInline)) { | |
1789 inline(target, args, forcedInline); | |
1790 } | |
1791 | |
1792 if (C1XOptions.TraceBytecodeParserLevel > 0) { | |
1793 if (C1XOptions.TraceBytecodeParserLevel < TRACELEVEL_STATE) { | |
1794 log.println("|"); | |
1795 log.println("| [return to " + curState.scope().method + "]"); | |
1796 } | |
1797 log.adjustIndentation(-1); | |
1798 log.println("/"); | |
1799 log.adjustIndentation(-1); | |
1800 } | |
1801 return true; | |
1802 } | |
1803 return false; | |
1804 } | |
1805 | |
1806 private boolean checkInliningConditions(RiMethod target) { | |
1807 if (!C1XOptions.OptInline) { | |
1808 return false; // all inlining is turned off | |
1809 } | |
1810 if (!target.isResolved()) { | |
1811 return cannotInline(target, "unresolved method"); | |
1812 } | |
1813 if (target.code() == null) { | |
1814 return cannotInline(target, "method has no code"); | |
1815 } | |
1816 if (!target.holder().isInitialized()) { | |
1817 return cannotInline(target, "holder is not initialized"); | |
1818 } | |
1819 if (recursiveInlineLevel(target) > C1XOptions.MaximumRecursiveInlineLevel) { | |
1820 return cannotInline(target, "recursive inlining too deep"); | |
1821 } | |
1822 if (target.code().length > scopeData.maxInlineSize()) { | |
1823 return cannotInline(target, "inlinee too large for this level"); | |
1824 } | |
1825 if (scopeData.scope.level + 1 > C1XOptions.MaximumInlineLevel) { | |
1826 return cannotInline(target, "inlining too deep"); | |
1827 } | |
1828 if (stats.nodeCount > C1XOptions.MaximumDesiredSize) { | |
1829 return cannotInline(target, "compilation already too big " + "(" + compilation.stats.nodeCount + " nodes)"); | |
1830 } | |
1831 if (compilation.runtime.mustNotInline(target)) { | |
1832 C1XMetrics.InlineForbiddenMethods++; | |
1833 return cannotInline(target, "inlining excluded by runtime"); | |
1834 } | |
1835 if (compilation.runtime.mustNotCompile(target)) { | |
1836 return cannotInline(target, "compile excluded by runtime"); | |
1837 } | |
1838 if (isSynchronized(target.accessFlags()) && !C1XOptions.OptInlineSynchronized) { | |
1839 return cannotInline(target, "is synchronized"); | |
1840 } | |
1841 if (target.exceptionHandlers().length != 0 && !C1XOptions.OptInlineExcept) { | |
1842 return cannotInline(target, "has exception handlers"); | |
1843 } | |
1844 if (!target.hasBalancedMonitors()) { | |
1845 return cannotInline(target, "has unbalanced monitors"); | |
1846 } | |
1847 if (target.isConstructor()) { | |
1848 if (compilation.runtime.isExceptionType(target.holder())) { | |
1849 // don't inline constructors of throwable classes unless the inlining tree is | |
1850 // rooted in a throwable class | |
1851 if (!compilation.runtime.isExceptionType(rootScope().method.holder())) { | |
1852 return cannotInline(target, "don't inline Throwable constructors"); | |
1853 } | |
1854 } | |
1855 } | |
1856 return true; | |
1857 } | |
1858 | |
1859 private boolean cannotInline(RiMethod target, String reason) { | |
1860 if (C1XOptions.PrintInliningFailures) { | |
1861 TTY.println("Cannot inline " + target.toString() + " into " + compilation.method.toString() + " because of " + reason); | |
1862 } | |
1863 return false; | |
1864 } | |
1865 | |
1866 private void inline(RiMethod target, Value[] args, boolean forcedInline) { | |
1867 BlockBegin orig = curBlock; | |
1868 if (!forcedInline && !isStatic(target.accessFlags())) { | |
1869 // the receiver object must be null-checked for instance methods | |
1870 Value receiver = args[0]; | |
1871 if (!receiver.isNonNull() && !receiver.kind.isWord()) { | |
1872 NullCheck check = new NullCheck(receiver, null); | |
1873 args[0] = append(check); | |
1874 } | |
1875 } | |
1876 | |
1877 // Introduce a new callee continuation point. All return instructions | |
1878 // in the callee will be transformed to Goto's to the continuation | |
1879 BlockBegin continuationBlock = blockAtOrNull(nextBCI()); | |
1880 boolean continuationExisted = true; | |
1881 if (continuationBlock == null) { | |
1882 // there was not already a block starting at the next BCI | |
1883 continuationBlock = new BlockBegin(nextBCI(), ir.nextBlockNumber()); | |
1884 continuationBlock.setDepthFirstNumber(0); | |
1885 continuationExisted = false; | |
1886 } | |
1887 // record the number of predecessors before inlining, to determine | |
1888 // whether the inlined method has added edges to the continuation | |
1889 int continuationPredecessors = continuationBlock.predecessors().size(); | |
1890 | |
1891 // push the target scope | |
1892 pushScope(target, continuationBlock); | |
1893 | |
1894 // pass parameters into the callee state | |
1895 FrameState calleeState = curState; | |
1896 for (int i = 0; i < args.length; i++) { | |
1897 Value arg = args[i]; | |
1898 if (arg != null) { | |
1899 calleeState.storeLocal(i, arg); | |
1900 } | |
1901 } | |
1902 | |
1903 // setup state that is used at returns from the inlined method. | |
1904 // this is essentially the state of the continuation block, | |
1905 // but without the return value on the stack. | |
1906 scopeData.setContinuationState(scope().callerState); | |
1907 | |
1908 Value lock = null; | |
1909 BlockBegin syncHandler = null; | |
1910 // inline the locking code if the target method is synchronized | |
1911 if (Modifier.isSynchronized(target.accessFlags())) { | |
1912 // lock the receiver object if it is an instance method, the class object otherwise | |
1913 lock = synchronizedObject(curState, target); | |
1914 syncHandler = new BlockBegin(Instruction.SYNCHRONIZATION_ENTRY_BCI, ir.nextBlockNumber()); | |
1915 syncHandler.setNext(null, -1); | |
1916 inlineSyncEntry(lock, syncHandler); | |
1917 } | |
1918 | |
1919 BlockBegin calleeStartBlock = blockAt(0); | |
1920 if (calleeStartBlock.isParserLoopHeader()) { | |
1921 // the block is a loop header, so we have to insert a goto | |
1922 Goto gotoCallee = new Goto(calleeStartBlock, null, false); | |
1923 gotoCallee.setStateAfter(curState.immutableCopy(bci())); | |
1924 appendWithoutOptimization(gotoCallee, 0); | |
1925 curBlock.setEnd(gotoCallee); | |
1926 calleeStartBlock.mergeOrClone(calleeState); | |
1927 lastInstr = curBlock = calleeStartBlock; | |
1928 scopeData.addToWorkList(calleeStartBlock); | |
1929 // now iterate over all the blocks | |
1930 iterateAllBlocks(); | |
1931 } else { | |
1932 // ready to resume parsing inlined method into this block | |
1933 iterateBytecodesForBlock(0, true); | |
1934 // now iterate over the rest of the blocks | |
1935 iterateAllBlocks(); | |
1936 } | |
1937 | |
1938 assert continuationExisted || !continuationBlock.wasVisited() : "continuation should not have been parsed if we created it"; | |
1939 | |
1940 ReturnBlock simpleInlineInfo = scopeData.simpleInlineInfo(); | |
1941 if (simpleInlineInfo != null && curBlock == orig) { | |
1942 // Optimization: during parsing of the callee we | |
1943 // generated at least one Goto to the continuation block. If we | |
1944 // generated exactly one, and if the inlined method spanned exactly | |
1945 // one block (and we didn't have to Goto its entry), then we snip | |
1946 // off the Goto to the continuation, allowing control to fall | |
1947 // through back into the caller block and effectively performing | |
1948 // block merging. This allows local load elimination and local value numbering | |
1949 // to take place across multiple callee scopes if they are relatively simple, and | |
1950 // is currently essential to making inlining profitable. It also reduces the | |
1951 // number of blocks in the CFG | |
1952 lastInstr = simpleInlineInfo.returnPredecessor; | |
1953 curState = simpleInlineInfo.returnState.popScope(); | |
1954 lastInstr.setNext(null, -1); | |
1955 } else if (continuationPredecessors == continuationBlock.predecessors().size()) { | |
1956 // Inlining caused the instructions after the invoke in the | |
1957 // caller to not reachable any more (i.e. no control flow path | |
1958 // in the callee was terminated by a return instruction). | |
1959 // So skip filling this block with instructions! | |
1960 assert continuationBlock == scopeData.continuation(); | |
1961 assert lastInstr instanceof BlockEnd; | |
1962 skipBlock = true; | |
1963 } else { | |
1964 // Resume parsing in continuation block unless it was already parsed. | |
1965 // Note that if we don't change lastInstr here, iteration in | |
1966 // iterateBytecodesForBlock will stop when we return. | |
1967 if (!scopeData.continuation().wasVisited()) { | |
1968 // add continuation to work list instead of parsing it immediately | |
1969 assert lastInstr instanceof BlockEnd; | |
1970 scopeData.parent.addToWorkList(scopeData.continuation()); | |
1971 skipBlock = true; | |
1972 } | |
1973 } | |
1974 | |
1975 // fill the exception handler for synchronized methods with instructions | |
1976 if (syncHandler != null && syncHandler.stateBefore() != null) { | |
1977 // generate unlocking code if the exception handler is reachable | |
1978 fillSyncHandler(lock, syncHandler, true); | |
1979 } else { | |
1980 popScope(); | |
1981 } | |
1982 | |
1983 stats.inlineCount++; | |
1984 } | |
1985 | |
1986 private Value synchronizedObject(FrameState curState2, RiMethod target) { | |
1987 if (isStatic(target.accessFlags())) { | |
1988 Constant classConstant = new Constant(target.holder().getEncoding(Representation.JavaClass)); | |
1989 return appendWithoutOptimization(classConstant, Instruction.SYNCHRONIZATION_ENTRY_BCI); | |
1990 } else { | |
1991 return curState2.localAt(0); | |
1992 } | |
1993 } | |
1994 | |
1995 private void inlineSyncEntry(Value lock, BlockBegin syncHandler) { | |
1996 genMonitorEnter(lock, Instruction.SYNCHRONIZATION_ENTRY_BCI); | |
1997 syncHandler.setExceptionEntry(); | |
1998 syncHandler.setBlockFlag(BlockBegin.BlockFlag.IsOnWorkList); | |
1999 ExceptionHandler handler = new ExceptionHandler(new CiExceptionHandler(0, method().code().length, -1, 0, null)); | |
2000 handler.setEntryBlock(syncHandler); | |
2001 scopeData.addExceptionHandler(handler); | |
2002 } | |
2003 | |
2004 private void fillSyncHandler(Value lock, BlockBegin syncHandler, boolean inlinedMethod) { | |
2005 BlockBegin origBlock = curBlock; | |
2006 MutableFrameState origState = curState; | |
2007 Instruction origLast = lastInstr; | |
2008 | |
2009 lastInstr = curBlock = syncHandler; | |
2010 while (lastInstr.next() != null) { | |
2011 // go forward to the end of the block | |
2012 lastInstr = lastInstr.next(); | |
2013 } | |
2014 curState = syncHandler.stateBefore().copy(); | |
2015 | |
2016 int bci = Instruction.SYNCHRONIZATION_ENTRY_BCI; | |
2017 Value exception = appendWithoutOptimization(new ExceptionObject(curState.immutableCopy(bci)), bci); | |
2018 | |
2019 assert lock != null; | |
2020 assert curState.locksSize() > 0 && curState.lockAt(locksSize() - 1) == lock; | |
2021 if (lock instanceof Instruction) { | |
2022 Instruction l = (Instruction) lock; | |
2023 if (!l.isAppended()) { | |
2024 lock = appendWithoutOptimization(l, Instruction.SYNCHRONIZATION_ENTRY_BCI); | |
2025 } | |
2026 } | |
2027 // exit the monitor | |
2028 genMonitorExit(lock, Instruction.SYNCHRONIZATION_ENTRY_BCI); | |
2029 | |
2030 // exit the context of the synchronized method | |
2031 if (inlinedMethod) { | |
2032 popScope(); | |
2033 bci = curState.scope().callerBCI(); | |
2034 curState = curState.popScope(); | |
2035 } | |
2036 | |
2037 apush(exception); | |
2038 genThrow(bci); | |
2039 BlockEnd end = (BlockEnd) lastInstr; | |
2040 curBlock.setEnd(end); | |
2041 end.setStateAfter(curState.immutableCopy(bci())); | |
2042 | |
2043 curBlock = origBlock; | |
2044 curState = origState; | |
2045 lastInstr = origLast; | |
2046 } | |
2047 | |
2048 private void iterateAllBlocks() { | |
2049 BlockBegin b; | |
2050 while ((b = scopeData.removeFromWorkList()) != null) { | |
2051 if (!b.wasVisited()) { | |
2052 if (b.isOsrEntry()) { | |
2053 // this is the OSR entry block, set up edges accordingly | |
2054 setupOsrEntryBlock(); | |
2055 // this is no longer the OSR entry block | |
2056 b.setOsrEntry(false); | |
2057 } | |
2058 b.setWasVisited(true); | |
2059 // now parse the block | |
2060 killMemoryMap(); | |
2061 curBlock = b; | |
2062 curState = b.stateBefore().copy(); | |
2063 lastInstr = b; | |
2064 b.setNext(null, -1); | |
2065 | |
2066 iterateBytecodesForBlock(b.bci(), false); | |
2067 } | |
2068 } | |
2069 } | |
2070 | |
2071 private void popScope() { | |
2072 int maxLocks = scope().maxLocks(); | |
2073 scopeData = scopeData.parent; | |
2074 scope().updateMaxLocks(maxLocks); | |
2075 } | |
2076 | |
2077 private void popScopeForJsr() { | |
2078 scopeData = scopeData.parent; | |
2079 } | |
2080 | |
2081 private void setupOsrEntryBlock() { | |
2082 assert compilation.isOsrCompilation(); | |
2083 | |
2084 int osrBCI = compilation.osrBCI; | |
2085 BytecodeStream s = scopeData.stream; | |
2086 RiOsrFrame frame = compilation.getOsrFrame(); | |
2087 s.setBCI(osrBCI); | |
2088 s.next(); // XXX: why go to next bytecode? | |
2089 | |
2090 // create a new block to contain the OSR setup code | |
2091 ir.osrEntryBlock = new BlockBegin(osrBCI, ir.nextBlockNumber()); | |
2092 ir.osrEntryBlock.setOsrEntry(true); | |
2093 ir.osrEntryBlock.setDepthFirstNumber(0); | |
2094 | |
2095 // get the target block of the OSR | |
2096 BlockBegin target = scopeData.blockAt(osrBCI); | |
2097 assert target != null && target.isOsrEntry(); | |
2098 | |
2099 MutableFrameState state = target.stateBefore().copy(); | |
2100 ir.osrEntryBlock.setStateBefore(state); | |
2101 | |
2102 killMemoryMap(); | |
2103 curBlock = ir.osrEntryBlock; | |
2104 curState = state.copy(); | |
2105 lastInstr = ir.osrEntryBlock; | |
2106 | |
2107 // create the entry instruction which represents the OSR state buffer | |
2108 // input from interpreter / JIT | |
2109 Instruction e = new OsrEntry(); | |
2110 e.setFlag(Value.Flag.NonNull, true); | |
2111 | |
2112 for (int i = 0; i < state.localsSize(); i++) { | |
2113 Value local = state.localAt(i); | |
2114 Value get; | |
2115 int offset = frame.getLocalOffset(i); | |
2116 if (local != null) { | |
2117 // this is a live local according to compiler | |
2118 if (local.kind.isObject() && !frame.isLiveObject(i)) { | |
2119 // the compiler thinks this is live, but not the interpreter | |
2120 // pretend that it passed null | |
2121 get = appendConstant(CiConstant.NULL_OBJECT); | |
2122 } else { | |
2123 Value oc = appendConstant(CiConstant.forInt(offset)); | |
2124 get = append(new UnsafeGetRaw(local.kind, e, oc, 0, true)); | |
2125 } | |
2126 state.storeLocal(i, get); | |
2127 } | |
2128 } | |
2129 | |
2130 assert state.callerState() == null; | |
2131 state.clearLocals(); | |
2132 // ATTN: assumption: state is not used further below, else add .immutableCopy() | |
2133 Goto g = new Goto(target, state, false); | |
2134 append(g); | |
2135 ir.osrEntryBlock.setEnd(g); | |
2136 target.mergeOrClone(ir.osrEntryBlock.end().stateAfter()); | |
2137 } | |
2138 | |
2139 private BlockEnd iterateBytecodesForBlock(int bci, boolean inliningIntoCurrentBlock) { | |
2140 skipBlock = false; | |
2141 assert curState != null; | |
2142 BytecodeStream s = scopeData.stream; | |
2143 s.setBCI(bci); | |
2144 | |
2145 BlockBegin block = curBlock; | |
2146 BlockEnd end = null; | |
2147 boolean pushException = block.isExceptionEntry() && block.next() == null; | |
2148 int prevBCI = bci; | |
2149 int endBCI = s.endBCI(); | |
2150 boolean blockStart = true; | |
2151 | |
2152 while (bci < endBCI) { | |
2153 BlockBegin nextBlock = blockAtOrNull(bci); | |
2154 if (bci == 0 && inliningIntoCurrentBlock) { | |
2155 if (!nextBlock.isParserLoopHeader()) { | |
2156 // Ignore the block boundary of the entry block of a method | |
2157 // being inlined unless the block is a loop header. | |
2158 nextBlock = null; | |
2159 blockStart = false; | |
2160 } | |
2161 } | |
2162 if (nextBlock != null && nextBlock != block) { | |
2163 // we fell through to the next block, add a goto and break | |
2164 end = new Goto(nextBlock, null, false); | |
2165 lastInstr = lastInstr.setNext(end, prevBCI); | |
2166 break; | |
2167 } | |
2168 // read the opcode | |
2169 int opcode = s.currentBC(); | |
2170 | |
2171 // check for active JSR during OSR compilation | |
2172 if (compilation.isOsrCompilation() && scope().isTopScope() && scopeData.parsingJsr() && s.currentBCI() == compilation.osrBCI) { | |
2173 throw new CiBailout("OSR not supported while a JSR is active"); | |
2174 } | |
2175 | |
2176 // push an exception object onto the stack if we are parsing an exception handler | |
2177 if (pushException) { | |
2178 FrameState stateBefore = curState.immutableCopy(bci()); | |
2179 apush(append(new ExceptionObject(stateBefore))); | |
2180 pushException = false; | |
2181 } | |
2182 | |
2183 traceState(); | |
2184 traceInstruction(bci, s, opcode, blockStart); | |
2185 processBytecode(bci, s, opcode); | |
2186 | |
2187 prevBCI = bci; | |
2188 | |
2189 if (lastInstr instanceof BlockEnd) { | |
2190 end = (BlockEnd) lastInstr; | |
2191 break; | |
2192 } | |
2193 s.next(); | |
2194 bci = s.currentBCI(); | |
2195 blockStart = false; | |
2196 } | |
2197 | |
2198 // stop processing of this block | |
2199 if (skipBlock) { | |
2200 skipBlock = false; | |
2201 return (BlockEnd) lastInstr; | |
2202 } | |
2203 | |
2204 // if the method terminates, we don't need the stack anymore | |
2205 if (end instanceof Return || end instanceof Throw) { | |
2206 curState.clearStack(); | |
2207 } | |
2208 | |
2209 // connect to begin and set state | |
2210 // NOTE that inlining may have changed the block we are parsing | |
2211 assert end != null : "end should exist after iterating over bytecodes"; | |
2212 end.setStateAfter(curState.immutableCopy(bci())); | |
2213 curBlock.setEnd(end); | |
2214 // propagate the state | |
2215 for (BlockBegin succ : end.successors()) { | |
2216 assert succ.predecessors().contains(curBlock); | |
2217 succ.mergeOrClone(end.stateAfter()); | |
2218 scopeData.addToWorkList(succ); | |
2219 } | |
2220 return end; | |
2221 } | |
2222 | |
2223 private void traceState() { | |
2224 if (C1XOptions.TraceBytecodeParserLevel >= TRACELEVEL_STATE && !TTY.isSuppressed()) { | |
2225 log.println(String.format("| state [nr locals = %d, stack depth = %d, method = %s]", curState.localsSize(), curState.stackSize(), curState.scope().method)); | |
2226 for (int i = 0; i < curState.localsSize(); ++i) { | |
2227 Value value = curState.localAt(i); | |
2228 log.println(String.format("| local[%d] = %-8s : %s", i, value == null ? "bogus" : value.kind.javaName, value)); | |
2229 } | |
2230 for (int i = 0; i < curState.stackSize(); ++i) { | |
2231 Value value = curState.stackAt(i); | |
2232 log.println(String.format("| stack[%d] = %-8s : %s", i, value == null ? "bogus" : value.kind.javaName, value)); | |
2233 } | |
2234 for (int i = 0; i < curState.locksSize(); ++i) { | |
2235 Value value = curState.lockAt(i); | |
2236 log.println(String.format("| lock[%d] = %-8s : %s", i, value == null ? "bogus" : value.kind.javaName, value)); | |
2237 } | |
2238 } | |
2239 } | |
2240 | |
2241 private void processBytecode(int bci, BytecodeStream s, int opcode) { | |
2242 int cpi; | |
2243 | |
2244 // Checkstyle: stop | |
2245 switch (opcode) { | |
2246 case NOP : /* nothing to do */ break; | |
2247 case ACONST_NULL : apush(appendConstant(CiConstant.NULL_OBJECT)); break; | |
2248 case ICONST_M1 : ipush(appendConstant(CiConstant.INT_MINUS_1)); break; | |
2249 case ICONST_0 : ipush(appendConstant(CiConstant.INT_0)); break; | |
2250 case ICONST_1 : ipush(appendConstant(CiConstant.INT_1)); break; | |
2251 case ICONST_2 : ipush(appendConstant(CiConstant.INT_2)); break; | |
2252 case ICONST_3 : ipush(appendConstant(CiConstant.INT_3)); break; | |
2253 case ICONST_4 : ipush(appendConstant(CiConstant.INT_4)); break; | |
2254 case ICONST_5 : ipush(appendConstant(CiConstant.INT_5)); break; | |
2255 case LCONST_0 : lpush(appendConstant(CiConstant.LONG_0)); break; | |
2256 case LCONST_1 : lpush(appendConstant(CiConstant.LONG_1)); break; | |
2257 case FCONST_0 : fpush(appendConstant(CiConstant.FLOAT_0)); break; | |
2258 case FCONST_1 : fpush(appendConstant(CiConstant.FLOAT_1)); break; | |
2259 case FCONST_2 : fpush(appendConstant(CiConstant.FLOAT_2)); break; | |
2260 case DCONST_0 : dpush(appendConstant(CiConstant.DOUBLE_0)); break; | |
2261 case DCONST_1 : dpush(appendConstant(CiConstant.DOUBLE_1)); break; | |
2262 case BIPUSH : ipush(appendConstant(CiConstant.forInt(s.readByte()))); break; | |
2263 case SIPUSH : ipush(appendConstant(CiConstant.forInt(s.readShort()))); break; | |
2264 case LDC : // fall through | |
2265 case LDC_W : // fall through | |
2266 case LDC2_W : genLoadConstant(s.readCPI()); break; | |
2267 case ILOAD : loadLocal(s.readLocalIndex(), CiKind.Int); break; | |
2268 case LLOAD : loadLocal(s.readLocalIndex(), CiKind.Long); break; | |
2269 case FLOAD : loadLocal(s.readLocalIndex(), CiKind.Float); break; | |
2270 case DLOAD : loadLocal(s.readLocalIndex(), CiKind.Double); break; | |
2271 case ALOAD : loadLocal(s.readLocalIndex(), CiKind.Object); break; | |
2272 case ILOAD_0 : // fall through | |
2273 case ILOAD_1 : // fall through | |
2274 case ILOAD_2 : // fall through | |
2275 case ILOAD_3 : loadLocal(opcode - ILOAD_0, CiKind.Int); break; | |
2276 case LLOAD_0 : // fall through | |
2277 case LLOAD_1 : // fall through | |
2278 case LLOAD_2 : // fall through | |
2279 case LLOAD_3 : loadLocal(opcode - LLOAD_0, CiKind.Long); break; | |
2280 case FLOAD_0 : // fall through | |
2281 case FLOAD_1 : // fall through | |
2282 case FLOAD_2 : // fall through | |
2283 case FLOAD_3 : loadLocal(opcode - FLOAD_0, CiKind.Float); break; | |
2284 case DLOAD_0 : // fall through | |
2285 case DLOAD_1 : // fall through | |
2286 case DLOAD_2 : // fall through | |
2287 case DLOAD_3 : loadLocal(opcode - DLOAD_0, CiKind.Double); break; | |
2288 case ALOAD_0 : // fall through | |
2289 case ALOAD_1 : // fall through | |
2290 case ALOAD_2 : // fall through | |
2291 case ALOAD_3 : loadLocal(opcode - ALOAD_0, CiKind.Object); break; | |
2292 case IALOAD : genLoadIndexed(CiKind.Int ); break; | |
2293 case LALOAD : genLoadIndexed(CiKind.Long ); break; | |
2294 case FALOAD : genLoadIndexed(CiKind.Float ); break; | |
2295 case DALOAD : genLoadIndexed(CiKind.Double); break; | |
2296 case AALOAD : genLoadIndexed(CiKind.Object); break; | |
2297 case BALOAD : genLoadIndexed(CiKind.Byte ); break; | |
2298 case CALOAD : genLoadIndexed(CiKind.Char ); break; | |
2299 case SALOAD : genLoadIndexed(CiKind.Short ); break; | |
2300 case ISTORE : storeLocal(CiKind.Int, s.readLocalIndex()); break; | |
2301 case LSTORE : storeLocal(CiKind.Long, s.readLocalIndex()); break; | |
2302 case FSTORE : storeLocal(CiKind.Float, s.readLocalIndex()); break; | |
2303 case DSTORE : storeLocal(CiKind.Double, s.readLocalIndex()); break; | |
2304 case ASTORE : storeLocal(CiKind.Object, s.readLocalIndex()); break; | |
2305 case ISTORE_0 : // fall through | |
2306 case ISTORE_1 : // fall through | |
2307 case ISTORE_2 : // fall through | |
2308 case ISTORE_3 : storeLocal(CiKind.Int, opcode - ISTORE_0); break; | |
2309 case LSTORE_0 : // fall through | |
2310 case LSTORE_1 : // fall through | |
2311 case LSTORE_2 : // fall through | |
2312 case LSTORE_3 : storeLocal(CiKind.Long, opcode - LSTORE_0); break; | |
2313 case FSTORE_0 : // fall through | |
2314 case FSTORE_1 : // fall through | |
2315 case FSTORE_2 : // fall through | |
2316 case FSTORE_3 : storeLocal(CiKind.Float, opcode - FSTORE_0); break; | |
2317 case DSTORE_0 : // fall through | |
2318 case DSTORE_1 : // fall through | |
2319 case DSTORE_2 : // fall through | |
2320 case DSTORE_3 : storeLocal(CiKind.Double, opcode - DSTORE_0); break; | |
2321 case ASTORE_0 : // fall through | |
2322 case ASTORE_1 : // fall through | |
2323 case ASTORE_2 : // fall through | |
2324 case ASTORE_3 : storeLocal(CiKind.Object, opcode - ASTORE_0); break; | |
2325 case IASTORE : genStoreIndexed(CiKind.Int ); break; | |
2326 case LASTORE : genStoreIndexed(CiKind.Long ); break; | |
2327 case FASTORE : genStoreIndexed(CiKind.Float ); break; | |
2328 case DASTORE : genStoreIndexed(CiKind.Double); break; | |
2329 case AASTORE : genStoreIndexed(CiKind.Object); break; | |
2330 case BASTORE : genStoreIndexed(CiKind.Byte ); break; | |
2331 case CASTORE : genStoreIndexed(CiKind.Char ); break; | |
2332 case SASTORE : genStoreIndexed(CiKind.Short ); break; | |
2333 case POP : // fall through | |
2334 case POP2 : // fall through | |
2335 case DUP : // fall through | |
2336 case DUP_X1 : // fall through | |
2337 case DUP_X2 : // fall through | |
2338 case DUP2 : // fall through | |
2339 case DUP2_X1 : // fall through | |
2340 case DUP2_X2 : // fall through | |
2341 case SWAP : stackOp(opcode); break; | |
2342 case IADD : // fall through | |
2343 case ISUB : // fall through | |
2344 case IMUL : genArithmeticOp(CiKind.Int, opcode); break; | |
2345 case IDIV : // fall through | |
2346 case IREM : genArithmeticOp(CiKind.Int, opcode, curState.immutableCopy(bci())); break; | |
2347 case LADD : // fall through | |
2348 case LSUB : // fall through | |
2349 case LMUL : genArithmeticOp(CiKind.Long, opcode); break; | |
2350 case LDIV : // fall through | |
2351 case LREM : genArithmeticOp(CiKind.Long, opcode, curState.immutableCopy(bci())); break; | |
2352 case FADD : // fall through | |
2353 case FSUB : // fall through | |
2354 case FMUL : // fall through | |
2355 case FDIV : // fall through | |
2356 case FREM : genArithmeticOp(CiKind.Float, opcode); break; | |
2357 case DADD : // fall through | |
2358 case DSUB : // fall through | |
2359 case DMUL : // fall through | |
2360 case DDIV : // fall through | |
2361 case DREM : genArithmeticOp(CiKind.Double, opcode); break; | |
2362 case INEG : genNegateOp(CiKind.Int); break; | |
2363 case LNEG : genNegateOp(CiKind.Long); break; | |
2364 case FNEG : genNegateOp(CiKind.Float); break; | |
2365 case DNEG : genNegateOp(CiKind.Double); break; | |
2366 case ISHL : // fall through | |
2367 case ISHR : // fall through | |
2368 case IUSHR : genShiftOp(CiKind.Int, opcode); break; | |
2369 case IAND : // fall through | |
2370 case IOR : // fall through | |
2371 case IXOR : genLogicOp(CiKind.Int, opcode); break; | |
2372 case LSHL : // fall through | |
2373 case LSHR : // fall through | |
2374 case LUSHR : genShiftOp(CiKind.Long, opcode); break; | |
2375 case LAND : // fall through | |
2376 case LOR : // fall through | |
2377 case LXOR : genLogicOp(CiKind.Long, opcode); break; | |
2378 case IINC : genIncrement(); break; | |
2379 case I2L : genConvert(opcode, CiKind.Int , CiKind.Long ); break; | |
2380 case I2F : genConvert(opcode, CiKind.Int , CiKind.Float ); break; | |
2381 case I2D : genConvert(opcode, CiKind.Int , CiKind.Double); break; | |
2382 case L2I : genConvert(opcode, CiKind.Long , CiKind.Int ); break; | |
2383 case L2F : genConvert(opcode, CiKind.Long , CiKind.Float ); break; | |
2384 case L2D : genConvert(opcode, CiKind.Long , CiKind.Double); break; | |
2385 case F2I : genConvert(opcode, CiKind.Float , CiKind.Int ); break; | |
2386 case F2L : genConvert(opcode, CiKind.Float , CiKind.Long ); break; | |
2387 case F2D : genConvert(opcode, CiKind.Float , CiKind.Double); break; | |
2388 case D2I : genConvert(opcode, CiKind.Double, CiKind.Int ); break; | |
2389 case D2L : genConvert(opcode, CiKind.Double, CiKind.Long ); break; | |
2390 case D2F : genConvert(opcode, CiKind.Double, CiKind.Float ); break; | |
2391 case I2B : genConvert(opcode, CiKind.Int , CiKind.Byte ); break; | |
2392 case I2C : genConvert(opcode, CiKind.Int , CiKind.Char ); break; | |
2393 case I2S : genConvert(opcode, CiKind.Int , CiKind.Short ); break; | |
2394 case LCMP : genCompareOp(CiKind.Long, opcode, CiKind.Int); break; | |
2395 case FCMPL : genCompareOp(CiKind.Float, opcode, CiKind.Int); break; | |
2396 case FCMPG : genCompareOp(CiKind.Float, opcode, CiKind.Int); break; | |
2397 case DCMPL : genCompareOp(CiKind.Double, opcode, CiKind.Int); break; | |
2398 case DCMPG : genCompareOp(CiKind.Double, opcode, CiKind.Int); break; | |
2399 case IFEQ : genIfZero(Condition.EQ); break; | |
2400 case IFNE : genIfZero(Condition.NE); break; | |
2401 case IFLT : genIfZero(Condition.LT); break; | |
2402 case IFGE : genIfZero(Condition.GE); break; | |
2403 case IFGT : genIfZero(Condition.GT); break; | |
2404 case IFLE : genIfZero(Condition.LE); break; | |
2405 case IF_ICMPEQ : genIfSame(CiKind.Int, Condition.EQ); break; | |
2406 case IF_ICMPNE : genIfSame(CiKind.Int, Condition.NE); break; | |
2407 case IF_ICMPLT : genIfSame(CiKind.Int, Condition.LT); break; | |
2408 case IF_ICMPGE : genIfSame(CiKind.Int, Condition.GE); break; | |
2409 case IF_ICMPGT : genIfSame(CiKind.Int, Condition.GT); break; | |
2410 case IF_ICMPLE : genIfSame(CiKind.Int, Condition.LE); break; | |
2411 case IF_ACMPEQ : genIfSame(peekKind(), Condition.EQ); break; | |
2412 case IF_ACMPNE : genIfSame(peekKind(), Condition.NE); break; | |
2413 case GOTO : genGoto(s.currentBCI(), s.readBranchDest()); break; | |
2414 case JSR : genJsr(s.readBranchDest()); break; | |
2415 case RET : genRet(s.readLocalIndex()); break; | |
2416 case TABLESWITCH : genTableswitch(); break; | |
2417 case LOOKUPSWITCH : genLookupswitch(); break; | |
2418 case IRETURN : genReturn(ipop()); break; | |
2419 case LRETURN : genReturn(lpop()); break; | |
2420 case FRETURN : genReturn(fpop()); break; | |
2421 case DRETURN : genReturn(dpop()); break; | |
2422 case ARETURN : genReturn(apop()); break; | |
2423 case RETURN : genReturn(null ); break; | |
2424 case GETSTATIC : cpi = s.readCPI(); genGetStatic(cpi, constantPool().lookupField(cpi, opcode)); break; | |
2425 case PUTSTATIC : cpi = s.readCPI(); genPutStatic(cpi, constantPool().lookupField(cpi, opcode)); break; | |
2426 case GETFIELD : cpi = s.readCPI(); genGetField(cpi, constantPool().lookupField(cpi, opcode)); break; | |
2427 case PUTFIELD : cpi = s.readCPI(); genPutField(cpi, constantPool().lookupField(cpi, opcode)); break; | |
2428 case INVOKEVIRTUAL : cpi = s.readCPI(); genInvokeVirtual(constantPool().lookupMethod(cpi, opcode), cpi, constantPool()); break; | |
2429 case INVOKESPECIAL : cpi = s.readCPI(); genInvokeSpecial(constantPool().lookupMethod(cpi, opcode), null, cpi, constantPool()); break; | |
2430 case INVOKESTATIC : cpi = s.readCPI(); genInvokeStatic(constantPool().lookupMethod(cpi, opcode), cpi, constantPool()); break; | |
2431 case INVOKEINTERFACE: cpi = s.readCPI(); genInvokeInterface(constantPool().lookupMethod(cpi, opcode), cpi, constantPool()); break; | |
2432 case NEW : genNewInstance(s.readCPI()); break; | |
2433 case NEWARRAY : genNewTypeArray(s.readLocalIndex()); break; | |
2434 case ANEWARRAY : genNewObjectArray(s.readCPI()); break; | |
2435 case ARRAYLENGTH : genArrayLength(); break; | |
2436 case ATHROW : genThrow(s.currentBCI()); break; | |
2437 case CHECKCAST : genCheckCast(); break; | |
2438 case INSTANCEOF : genInstanceOf(); break; | |
2439 case MONITORENTER : genMonitorEnter(apop(), s.currentBCI()); break; | |
2440 case MONITOREXIT : genMonitorExit(apop(), s.currentBCI()); break; | |
2441 case MULTIANEWARRAY : genNewMultiArray(s.readCPI()); break; | |
2442 case IFNULL : genIfNull(Condition.EQ); break; | |
2443 case IFNONNULL : genIfNull(Condition.NE); break; | |
2444 case GOTO_W : genGoto(s.currentBCI(), s.readFarBranchDest()); break; | |
2445 case JSR_W : genJsr(s.readFarBranchDest()); break; | |
2446 default: | |
2447 processExtendedBytecode(bci, s, opcode); | |
2448 } | |
2449 // Checkstyle: resume | |
2450 } | |
2451 | |
2452 private void processExtendedBytecode(int bci, BytecodeStream s, int opcode) { | |
2453 // Checkstyle: stop | |
2454 switch (opcode) { | |
2455 case UNSAFE_CAST : genUnsafeCast(constantPool().lookupMethod(s.readCPI(), (byte)Bytecodes.UNSAFE_CAST)); break; | |
2456 case WLOAD : loadLocal(s.readLocalIndex(), CiKind.Word); break; | |
2457 case WLOAD_0 : loadLocal(0, CiKind.Word); break; | |
2458 case WLOAD_1 : loadLocal(1, CiKind.Word); break; | |
2459 case WLOAD_2 : loadLocal(2, CiKind.Word); break; | |
2460 case WLOAD_3 : loadLocal(3, CiKind.Word); break; | |
2461 | |
2462 case WSTORE : storeLocal(CiKind.Word, s.readLocalIndex()); break; | |
2463 case WSTORE_0 : // fall through | |
2464 case WSTORE_1 : // fall through | |
2465 case WSTORE_2 : // fall through | |
2466 case WSTORE_3 : storeLocal(CiKind.Word, opcode - WSTORE_0); break; | |
2467 | |
2468 case WCONST_0 : wpush(appendConstant(CiConstant.ZERO)); break; | |
2469 case WDIV : // fall through | |
2470 case WREM : genArithmeticOp(CiKind.Word, opcode, curState.immutableCopy(bci())); break; | |
2471 case WDIVI : genArithmeticOp(CiKind.Word, opcode, CiKind.Word, CiKind.Int, curState.immutableCopy(bci())); break; | |
2472 case WREMI : genArithmeticOp(CiKind.Int, opcode, CiKind.Word, CiKind.Int, curState.immutableCopy(bci())); break; | |
2473 | |
2474 case READREG : genLoadRegister(s.readCPI()); break; | |
2475 case WRITEREG : genStoreRegister(s.readCPI()); break; | |
2476 case INCREG : genIncRegister(s.readCPI()); break; | |
2477 | |
2478 case PREAD : genLoadPointer(PREAD | (s.readCPI() << 8)); break; | |
2479 case PGET : genLoadPointer(PGET | (s.readCPI() << 8)); break; | |
2480 case PWRITE : genStorePointer(PWRITE | (s.readCPI() << 8)); break; | |
2481 case PSET : genStorePointer(PSET | (s.readCPI() << 8)); break; | |
2482 case PCMPSWP : genCompareAndSwap(PCMPSWP | (s.readCPI() << 8)); break; | |
2483 case MEMBAR : genMemoryBarrier(s.readCPI()); break; | |
2484 | |
2485 case WRETURN : genReturn(wpop()); break; | |
2486 case INFOPOINT : genInfopoint(INFOPOINT | (s.readUByte(bci() + 1) << 16), s.readUByte(bci() + 2) != 0); break; | |
2487 case JNICALL : genNativeCall(s.readCPI()); break; | |
2488 case JNIOP : genJniOp(s.readCPI()); break; | |
2489 case ALLOCA : genStackAllocate(); break; | |
2490 | |
2491 case MOV_I2F : genConvert(opcode, CiKind.Int, CiKind.Float ); break; | |
2492 case MOV_F2I : genConvert(opcode, CiKind.Float, CiKind.Int ); break; | |
2493 case MOV_L2D : genConvert(opcode, CiKind.Long, CiKind.Double ); break; | |
2494 case MOV_D2L : genConvert(opcode, CiKind.Double, CiKind.Long ); break; | |
2495 | |
2496 case UCMP : genUnsignedCompareOp(CiKind.Int, opcode, s.readCPI()); break; | |
2497 case UWCMP : genUnsignedCompareOp(CiKind.Word, opcode, s.readCPI()); break; | |
2498 | |
2499 case STACKHANDLE : genStackHandle(s.readCPI() == 0); break; | |
2500 case BREAKPOINT_TRAP: genBreakpointTrap(); break; | |
2501 case PAUSE : genPause(); break; | |
2502 case LSB : // fall through | |
2503 case MSB : genSignificantBit(opcode);break; | |
2504 | |
2505 case TEMPLATE_CALL : genTemplateCall(constantPool().lookupMethod(s.readCPI(), (byte)Bytecodes.TEMPLATE_CALL)); break; | |
2506 case ICMP : genCompareOp(CiKind.Int, opcode, CiKind.Void); break; | |
2507 case WCMP : genCompareOp(CiKind.Word, opcode, CiKind.Void); break; | |
2508 | |
2509 case BREAKPOINT: | |
2510 throw new CiBailout("concurrent setting of breakpoint"); | |
2511 default: | |
2512 throw new CiBailout("Unsupported opcode " + opcode + " (" + nameOf(opcode) + ") [bci=" + bci + "]"); | |
2513 } | |
2514 // Checkstyle: resume | |
2515 } | |
2516 | |
2517 private void traceInstruction(int bci, BytecodeStream s, int opcode, boolean blockStart) { | |
2518 if (C1XOptions.TraceBytecodeParserLevel >= TRACELEVEL_INSTRUCTIONS && !TTY.isSuppressed()) { | |
2519 StringBuilder sb = new StringBuilder(40); | |
2520 sb.append(blockStart ? '+' : '|'); | |
2521 if (bci < 10) { | |
2522 sb.append(" "); | |
2523 } else if (bci < 100) { | |
2524 sb.append(' '); | |
2525 } | |
2526 sb.append(bci).append(": ").append(Bytecodes.nameOf(opcode)); | |
2527 for (int i = bci + 1; i < s.nextBCI(); ++i) { | |
2528 sb.append(' ').append(s.readUByte(i)); | |
2529 } | |
2530 log.println(sb.toString()); | |
2531 } | |
2532 } | |
2533 | |
2534 private void genPause() { | |
2535 append(new Pause()); | |
2536 } | |
2537 | |
2538 private void genBreakpointTrap() { | |
2539 append(new BreakpointTrap()); | |
2540 } | |
2541 | |
2542 private void genStackHandle(boolean isCategory1) { | |
2543 Value value = curState.xpop(); | |
2544 wpush(append(new StackHandle(value))); | |
2545 } | |
2546 | |
2547 private void genStackAllocate() { | |
2548 Value size = pop(CiKind.Int); | |
2549 wpush(append(new StackAllocate(size))); | |
2550 } | |
2551 | |
2552 private void genSignificantBit(int opcode) { | |
2553 Value value = pop(CiKind.Word); | |
2554 push(CiKind.Int, append(new SignificantBitOp(value, opcode))); | |
2555 } | |
2556 | |
2557 private void appendSnippetCall(RiSnippetCall snippetCall) { | |
2558 Value[] args = new Value[snippetCall.arguments.length]; | |
2559 RiMethod snippet = snippetCall.snippet; | |
2560 RiSignature signature = snippet.signature(); | |
2561 assert signature.argumentCount(!isStatic(snippet.accessFlags())) == args.length; | |
2562 for (int i = args.length - 1; i >= 0; --i) { | |
2563 CiKind argKind = signature.argumentKindAt(i); | |
2564 if (snippetCall.arguments[i] == null) { | |
2565 args[i] = pop(argKind); | |
2566 } else { | |
2567 args[i] = append(new Constant(snippetCall.arguments[i])); | |
2568 } | |
2569 } | |
2570 | |
2571 if (!tryRemoveCall(snippet, args, true)) { | |
2572 if (!tryInline(snippet, args)) { | |
2573 appendInvoke(snippetCall.opcode, snippet, args, true, (char) 0, constantPool()); | |
2574 } | |
2575 } | |
2576 } | |
2577 | |
2578 private void genJniOp(int operand) { | |
2579 RiSnippets snippets = compilation.runtime.getSnippets(); | |
2580 switch (operand) { | |
2581 case JniOp.LINK: { | |
2582 RiMethod nativeMethod = scope().method; | |
2583 RiSnippetCall linkSnippet = snippets.link(nativeMethod); | |
2584 if (linkSnippet.result != null) { | |
2585 wpush(appendConstant(linkSnippet.result)); | |
2586 } else { | |
2587 appendSnippetCall(linkSnippet); | |
2588 } | |
2589 break; | |
2590 } | |
2591 case JniOp.J2N: { | |
2592 RiMethod nativeMethod = scope().method; | |
2593 appendSnippetCall(snippets.enterNative(nativeMethod)); | |
2594 break; | |
2595 } | |
2596 case JniOp.N2J: { | |
2597 RiMethod nativeMethod = scope().method; | |
2598 appendSnippetCall(snippets.enterVM(nativeMethod)); | |
2599 break; | |
2600 } | |
2601 } | |
2602 } | |
2603 | |
2604 private void genNativeCall(int cpi) { | |
2605 Value nativeFunctionAddress = wpop(); | |
2606 RiSignature sig = constantPool().lookupSignature(cpi); | |
2607 Value[] args = curState.popArguments(sig.argumentSlots(false)); | |
2608 | |
2609 RiMethod nativeMethod = scope().method; | |
2610 CiKind returnKind = sig.returnKind(); | |
2611 pushReturn(returnKind, append(new NativeCall(nativeMethod, sig, nativeFunctionAddress, args, null))); | |
2612 | |
2613 // Sign extend or zero the upper bits of a return value smaller than an int to | |
2614 // preserve the invariant that all such values are represented by an int | |
2615 // in the VM. We cannot rely on the native C compiler doing this for us. | |
2616 switch (sig.returnKind()) { | |
2617 case Boolean: | |
2618 case Byte: { | |
2619 genConvert(I2B, CiKind.Int, CiKind.Byte); | |
2620 break; | |
2621 } | |
2622 case Short: { | |
2623 genConvert(I2S, CiKind.Int, CiKind.Short); | |
2624 break; | |
2625 } | |
2626 case Char: { | |
2627 genConvert(I2C, CiKind.Int, CiKind.Char); | |
2628 break; | |
2629 } | |
2630 } | |
2631 } | |
2632 | |
2633 void genTemplateCall(RiMethod method) { | |
2634 RiSignature sig = method.signature(); | |
2635 Value[] args = curState.popArguments(sig.argumentSlots(false)); | |
2636 assert args.length <= 2; | |
2637 CiKind returnKind = sig.returnKind(); | |
2638 Value address = null; | |
2639 Value receiver = null; | |
2640 if (args.length == 1) { | |
2641 address = args[0]; | |
2642 assert address.kind.isWord(); | |
2643 } else if (args.length == 2) { | |
2644 address = args[0]; | |
2645 assert address.kind.isWord(); | |
2646 receiver = args[1]; | |
2647 assert receiver.kind.isObject(); | |
2648 } | |
2649 pushReturn(returnKind, append(new TemplateCall(returnKind, address, receiver))); | |
2650 } | |
2651 | |
2652 private void genInfopoint(int opcode, boolean inclFrame) { | |
2653 // TODO: create slimmer frame state if inclFrame is false | |
2654 FrameState state = curState.immutableCopy(bci()); | |
2655 assert opcode != SAFEPOINT || !scopeData.noSafepoints() : "cannot place explicit safepoint in uninterruptible code scope"; | |
2656 Value result = append(new Infopoint(opcode, state)); | |
2657 if (!result.kind.isVoid()) { | |
2658 push(result.kind, result); | |
2659 } | |
2660 } | |
2661 | |
2662 private void genLoadRegister(int registerId) { | |
2663 CiRegister register = compilation.registerConfig.getRegisterForRole(registerId); | |
2664 if (register == null) { | |
2665 throw new CiBailout("Unsupported READREG operand " + registerId); | |
2666 } | |
2667 LoadRegister load = new LoadRegister(CiKind.Word, register); | |
2668 RiRegisterAttributes regAttr = compilation.registerConfig.getAttributesMap()[register.number]; | |
2669 if (regAttr.isNonZero) { | |
2670 load.setFlag(Flag.NonNull); | |
2671 } | |
2672 wpush(append(load)); | |
2673 } | |
2674 | |
2675 private void genStoreRegister(int registerId) { | |
2676 CiRegister register = compilation.registerConfig.getRegisterForRole(registerId); | |
2677 if (register == null) { | |
2678 throw new CiBailout("Unsupported WRITEREG operand " + registerId); | |
2679 } | |
2680 Value value = pop(CiKind.Word); | |
2681 append(new StoreRegister(CiKind.Word, register, value)); | |
2682 } | |
2683 | |
2684 private void genIncRegister(int registerId) { | |
2685 CiRegister register = compilation.registerConfig.getRegisterForRole(registerId); | |
2686 if (register == null) { | |
2687 throw new CiBailout("Unsupported INCREG operand " + registerId); | |
2688 } | |
2689 Value value = pop(CiKind.Int); | |
2690 append(new IncrementRegister(register, value)); | |
2691 } | |
2692 | |
2693 /** | |
2694 * Gets the data kind corresponding to a given pointer operation opcode. | |
2695 * The data kind may be more specific than a {@linkplain CiKind#stackKind()}. | |
2696 * | |
2697 * @return the kind of value at the address accessed by the pointer operation denoted by {@code opcode} | |
2698 */ | |
2699 private static CiKind dataKindForPointerOp(int opcode) { | |
2700 switch (opcode) { | |
2701 case PGET_BYTE : | |
2702 case PSET_BYTE : | |
2703 case PREAD_BYTE : | |
2704 case PREAD_BYTE_I : | |
2705 case PWRITE_BYTE : | |
2706 case PWRITE_BYTE_I : return CiKind.Byte; | |
2707 case PGET_CHAR : | |
2708 case PREAD_CHAR : | |
2709 case PREAD_CHAR_I : return CiKind.Char; | |
2710 case PGET_SHORT : | |
2711 case PSET_SHORT : | |
2712 case PREAD_SHORT : | |
2713 case PREAD_SHORT_I : | |
2714 case PWRITE_SHORT : | |
2715 case PWRITE_SHORT_I : return CiKind.Short; | |
2716 case PGET_INT : | |
2717 case PSET_INT : | |
2718 case PREAD_INT : | |
2719 case PREAD_INT_I : | |
2720 case PWRITE_INT : | |
2721 case PWRITE_INT_I : return CiKind.Int; | |
2722 case PGET_FLOAT : | |
2723 case PSET_FLOAT : | |
2724 case PREAD_FLOAT : | |
2725 case PREAD_FLOAT_I : | |
2726 case PWRITE_FLOAT : | |
2727 case PWRITE_FLOAT_I : return CiKind.Float; | |
2728 case PGET_LONG : | |
2729 case PSET_LONG : | |
2730 case PREAD_LONG : | |
2731 case PREAD_LONG_I : | |
2732 case PWRITE_LONG : | |
2733 case PWRITE_LONG_I : return CiKind.Long; | |
2734 case PGET_DOUBLE : | |
2735 case PSET_DOUBLE : | |
2736 case PREAD_DOUBLE : | |
2737 case PREAD_DOUBLE_I : | |
2738 case PWRITE_DOUBLE : | |
2739 case PWRITE_DOUBLE_I : return CiKind.Double; | |
2740 case PGET_WORD : | |
2741 case PSET_WORD : | |
2742 case PREAD_WORD : | |
2743 case PREAD_WORD_I : | |
2744 case PWRITE_WORD : | |
2745 case PWRITE_WORD_I : return CiKind.Word; | |
2746 case PGET_REFERENCE : | |
2747 case PSET_REFERENCE : | |
2748 case PREAD_REFERENCE : | |
2749 case PREAD_REFERENCE_I : | |
2750 case PWRITE_REFERENCE : | |
2751 case PWRITE_REFERENCE_I : return CiKind.Object; | |
2752 default: | |
2753 throw new CiBailout("Unsupported pointer operation opcode " + opcode + "(" + nameOf(opcode) + ")"); | |
2754 } | |
2755 } | |
2756 | |
2757 /** | |
2758 * Pops the value producing the scaled-index or the byte offset for a pointer operation. | |
2759 * If compiling for a 64-bit platform and the value is an {@link CiKind#Int} parameter, | |
2760 * then a conversion is inserted to sign extend the int to a word. | |
2761 * | |
2762 * This is required as the value is used as a 64-bit value and so the high 32 bits | |
2763 * need to be correct. | |
2764 * | |
2765 * @param isInt specifies if the value is an {@code int} | |
2766 */ | |
2767 private Value popOffsetOrIndexForPointerOp(boolean isInt) { | |
2768 if (isInt) { | |
2769 Value offsetOrIndex = ipop(); | |
2770 if (compilation.target.arch.is64bit() && offsetOrIndex instanceof Local) { | |
2771 return append(new Convert(I2L, offsetOrIndex, CiKind.Word)); | |
2772 } | |
2773 return offsetOrIndex; | |
2774 } | |
2775 return wpop(); | |
2776 } | |
2777 | |
2778 private void genLoadPointer(int opcode) { | |
2779 FrameState stateBefore = curState.immutableCopy(bci()); | |
2780 CiKind dataKind = dataKindForPointerOp(opcode); | |
2781 Value offsetOrIndex; | |
2782 Value displacement; | |
2783 if ((opcode & 0xff) == PREAD) { | |
2784 offsetOrIndex = popOffsetOrIndexForPointerOp(opcode >= PREAD_BYTE_I && opcode <= PREAD_REFERENCE_I); | |
2785 displacement = null; | |
2786 } else { | |
2787 offsetOrIndex = popOffsetOrIndexForPointerOp(true); | |
2788 displacement = ipop(); | |
2789 } | |
2790 Value pointer = wpop(); | |
2791 push(dataKind.stackKind(), append(new LoadPointer(dataKind, opcode, pointer, displacement, offsetOrIndex, stateBefore, false))); | |
2792 } | |
2793 | |
2794 private void genStorePointer(int opcode) { | |
2795 FrameState stateBefore = curState.immutableCopy(bci()); | |
2796 CiKind dataKind = dataKindForPointerOp(opcode); | |
2797 Value value = pop(dataKind.stackKind()); | |
2798 Value offsetOrIndex; | |
2799 Value displacement; | |
2800 if ((opcode & 0xff) == PWRITE) { | |
2801 offsetOrIndex = popOffsetOrIndexForPointerOp(opcode >= PWRITE_BYTE_I && opcode <= PWRITE_REFERENCE_I); | |
2802 displacement = null; | |
2803 } else { | |
2804 offsetOrIndex = popOffsetOrIndexForPointerOp(true); | |
2805 displacement = ipop(); | |
2806 } | |
2807 Value pointer = wpop(); | |
2808 append(new StorePointer(opcode, dataKind, pointer, displacement, offsetOrIndex, value, stateBefore, false)); | |
2809 } | |
2810 | |
2811 private static CiKind kindForCompareAndSwap(int opcode) { | |
2812 switch (opcode) { | |
2813 case PCMPSWP_INT : | |
2814 case PCMPSWP_INT_I : return CiKind.Int; | |
2815 case PCMPSWP_WORD : | |
2816 case PCMPSWP_WORD_I : return CiKind.Word; | |
2817 case PCMPSWP_REFERENCE : | |
2818 case PCMPSWP_REFERENCE_I: return CiKind.Object; | |
2819 default: | |
2820 throw new CiBailout("Unsupported compare-and-swap opcode " + opcode + "(" + nameOf(opcode) + ")"); | |
2821 } | |
2822 } | |
2823 | |
2824 private void genCompareAndSwap(int opcode) { | |
2825 FrameState stateBefore = curState.immutableCopy(bci()); | |
2826 CiKind kind = kindForCompareAndSwap(opcode); | |
2827 Value newValue = pop(kind); | |
2828 Value expectedValue = pop(kind); | |
2829 Value offset; | |
2830 offset = popOffsetOrIndexForPointerOp(opcode >= PCMPSWP_INT_I && opcode <= PCMPSWP_REFERENCE_I); | |
2831 Value pointer = wpop(); | |
2832 push(kind, append(new CompareAndSwap(opcode, pointer, offset, expectedValue, newValue, stateBefore, false))); | |
2833 } | |
2834 | |
2835 | |
2836 private void genMemoryBarrier(int barriers) { | |
2837 int explicitMemoryBarriers = barriers & ~compilation.target.arch.implicitMemoryBarriers; | |
2838 if (explicitMemoryBarriers != 0) { | |
2839 append(new MemoryBarrier(explicitMemoryBarriers)); | |
2840 } | |
2841 } | |
2842 | |
2843 private void genArrayLength() { | |
2844 FrameState stateBefore = curState.immutableCopy(bci()); | |
2845 ipush(append(new ArrayLength(apop(), stateBefore))); | |
2846 } | |
2847 | |
2848 void killMemoryMap() { | |
2849 if (localValueMap != null) { | |
2850 localValueMap.killAll(); | |
2851 } | |
2852 if (memoryMap != null) { | |
2853 memoryMap.kill(); | |
2854 } | |
2855 } | |
2856 | |
2857 boolean assumeLeafClass(RiType type) { | |
2858 if (type.isResolved()) { | |
2859 if (isFinal(type.accessFlags())) { | |
2860 return true; | |
2861 } | |
2862 | |
2863 if (C1XOptions.UseAssumptions) { | |
2864 RiType assumed = type.uniqueConcreteSubtype(); | |
2865 if (assumed != null && assumed == type) { | |
2866 if (C1XOptions.PrintAssumptions) { | |
2867 TTY.println("Recording leaf class assumption for " + type.name()); | |
2868 } | |
2869 compilation.assumptions.recordConcreteSubtype(type, assumed); | |
2870 return true; | |
2871 } | |
2872 } | |
2873 } | |
2874 return false; | |
2875 } | |
2876 | |
2877 RiMethod getAssumedLeafMethod(RiMethod method) { | |
2878 if (method.isResolved()) { | |
2879 if (method.isLeafMethod()) { | |
2880 return method; | |
2881 } | |
2882 | |
2883 if (C1XOptions.UseAssumptions) { | |
2884 RiMethod assumed = method.uniqueConcreteMethod(); | |
2885 if (assumed != null) { | |
2886 if (C1XOptions.PrintAssumptions) { | |
2887 TTY.println("Recording concrete method assumption in context of " + method.holder().name() + ": " + assumed.name()); | |
2888 } | |
2889 compilation.assumptions.recordConcreteMethod(method, assumed); | |
2890 return assumed; | |
2891 } else { | |
2892 if (C1XOptions.PrintAssumptions) { | |
2893 TTY.println("Did not find unique concrete method for " + method); | |
2894 } | |
2895 } | |
2896 } | |
2897 } | |
2898 return null; | |
2899 } | |
2900 | |
2901 private int recursiveInlineLevel(RiMethod target) { | |
2902 int rec = 0; | |
2903 IRScope scope = scope(); | |
2904 while (scope != null) { | |
2905 if (scope.method != target) { | |
2906 break; | |
2907 } | |
2908 scope = scope.caller; | |
2909 rec++; | |
2910 } | |
2911 return rec; | |
2912 } | |
2913 | |
2914 private RiConstantPool constantPool() { | |
2915 return scopeData.constantPool; | |
2916 } | |
2917 } |