changeset 7075:cd2c08049dd5

Merge.
author Christian Haeubl <haeubl@ssw.jku.at>
date Tue, 27 Nov 2012 12:12:02 +0100
parents 1361501d6bd5 (diff) 52c88c405d07 (current diff)
children 32408bd16b91
files graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CastFromHub.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/CheckCastSnippets.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/InstanceOfSnippets.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/MonitorSnippets.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/NewObjectSnippets.java graal/com.oracle.graal.snippets.test/src/com/oracle/graal/snippets/WordTest.java graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/InstanceOfSnippetsTemplates.java graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetInstaller.java graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetTemplate.java
diffstat 75 files changed, 7345 insertions(+), 7485 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Assumptions.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Assumptions.java	Tue Nov 27 12:12:02 2012 +0100
@@ -167,8 +167,12 @@
      * Array with the assumptions. This field is directly accessed from C++ code in the Graal/HotSpot implementation.
      */
     private Assumption[] list;
+    private boolean useOptimisticAssumptions;
+    private int count;
 
-    private int count;
+    public Assumptions(boolean useOptimisticAssumptions) {
+        this.useOptimisticAssumptions = useOptimisticAssumptions;
+    }
 
     /**
      * Returns whether any assumptions have been registered.
@@ -178,6 +182,10 @@
         return count == 0;
     }
 
+    public boolean useOptimisticAssumptions() {
+        return useOptimisticAssumptions;
+    }
+
     @Override
     public Iterator<Assumption> iterator() {
         return new Iterator<Assumptions.Assumption>() {
@@ -203,9 +211,9 @@
      * @param receiverType the type that is assumed to have no finalizable subclasses
      * @return {@code true} if the assumption was recorded and can be assumed; {@code false} otherwise
      */
-    @SuppressWarnings("static-method")
     public boolean recordNoFinalizableSubclassAssumption(ResolvedJavaType receiverType) {
         // TODO (thomaswue): Record that assumption correctly.
+        assert useOptimisticAssumptions;
         return false;
     }
 
@@ -215,6 +223,7 @@
      * @param subtype the one concrete subtype
      */
     public void recordConcreteSubtype(ResolvedJavaType context, ResolvedJavaType subtype) {
+        assert useOptimisticAssumptions;
         record(new ConcreteSubtype(context, subtype));
     }
 
@@ -227,6 +236,7 @@
      * @param impl the concrete method that is the only possible target for the virtual call
      */
     public void recordConcreteMethod(ResolvedJavaMethod method, ResolvedJavaType context, ResolvedJavaMethod impl) {
+        assert useOptimisticAssumptions;
         record(new ConcreteMethod(method, context, impl));
     }
 
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TypeCheckHints.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TypeCheckHints.java	Tue Nov 27 12:12:02 2012 +0100
@@ -65,7 +65,7 @@
             ResolvedJavaType uniqueSubtype = type == null ? null : type.findUniqueConcreteSubtype();
             if (uniqueSubtype != null) {
                 types = new ResolvedJavaType[] {uniqueSubtype};
-                if (assumptions != null) {
+                if (assumptions.useOptimisticAssumptions()) {
                     assumptions.recordConcreteSubtype(type, uniqueSubtype);
                     exact = true;
                 } else {
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Tue Nov 27 12:12:02 2012 +0100
@@ -118,7 +118,7 @@
     public boolean canStoreConstant(Constant c) {
         // there is no immediate move of 64-bit constants on Intel
         switch (c.getKind()) {
-            case Long:   return Util.isInt(c.asLong());
+            case Long:   return Util.isInt(c.asLong()) && !runtime.needsDataPatch(c);
             case Double: return false;
             case Object: return c.isNull();
             default:     return true;
@@ -128,7 +128,7 @@
     @Override
     public boolean canInlineConstant(Constant c) {
         switch (c.getKind()) {
-            case Long:   return NumUtil.isInt(c.asLong());
+            case Long:   return NumUtil.isInt(c.asLong()) && !runtime.needsDataPatch(c);
             case Object: return c.isNull();
             default:     return true;
         }
@@ -147,8 +147,13 @@
             } else if (asConstant(base).getKind() != Kind.Object) {
                 long newDisplacement = displacement + asConstant(base).asLong();
                 if (NumUtil.isInt(newDisplacement)) {
+                    assert !runtime.needsDataPatch(asConstant(base));
                     displacement = (int) newDisplacement;
                     base = Value.ILLEGAL;
+                } else {
+                    Value newBase = newVariable(Kind.Long);
+                    emitMove(base, newBase);
+                    base = newBase;
                 }
             }
         }
@@ -679,6 +684,7 @@
         int displacement = node.displacement();
         Value index = operand(node.offset());
         if (isConstant(index) && NumUtil.isInt(asConstant(index).asLong() + displacement)) {
+            assert !runtime.needsDataPatch(asConstant(index));
             displacement += (int) asConstant(index).asLong();
             address = new Address(kind, load(operand(node.object())), displacement);
         } else {
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/BoxingEliminationTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/BoxingEliminationTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -26,6 +26,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.extended.*;
@@ -123,13 +124,14 @@
                     hints.add(invoke);
                 }
 
-                new InliningPhase(null, runtime(), hints, null, null, phasePlan, OptimisticOptimizations.ALL).apply(graph);
-                new CanonicalizerPhase(null, runtime(), null).apply(graph);
+                Assumptions assumptions = new Assumptions(false);
+                new InliningPhase(null, runtime(), hints, assumptions, null, phasePlan, OptimisticOptimizations.ALL).apply(graph);
+                new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
                 Debug.dump(graph, "Graph");
                 new BoxingEliminationPhase().apply(graph);
                 Debug.dump(graph, "Graph");
                 new ExpandBoxingNodesPhase(pool).apply(graph);
-                new CanonicalizerPhase(null, runtime(), null).apply(graph);
+                new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
                 new DeadCodeEliminationPhase().apply(graph);
                 StructuredGraph referenceGraph = parse(referenceSnippet);
                 assertEquals(referenceGraph, graph);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/CompareCanonicalizerTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/CompareCanonicalizerTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.common.*;
 
@@ -36,10 +37,11 @@
             StructuredGraph graph = parse("canonicalCompare" + i);
             assertEquals(referenceGraph, graph);
         }
-        new CanonicalizerPhase(null, runtime(), null).apply(referenceGraph);
+        Assumptions assumptions = new Assumptions(false);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(referenceGraph);
         for (int i = 1; i < 4; i++) {
             StructuredGraph graph = parse("canonicalCompare" + i);
-            new CanonicalizerPhase(null, runtime(), null).apply(graph);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
             assertEquals(referenceGraph, graph);
         }
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/CompiledMethodTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/CompiledMethodTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -55,7 +55,7 @@
     public void test1() {
         Method method = getMethod("testMethod");
         final StructuredGraph graph = parse(method);
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        new CanonicalizerPhase(null, runtime(), new Assumptions(false)).apply(graph);
         new DeadCodeEliminationPhase().apply(graph);
 
         for (Node node : graph.getNodes()) {
@@ -112,7 +112,7 @@
         ResolvedJavaMethod javaMethod = runtime.lookupJavaMethod(method);
         StructuredGraph graph = new StructuredGraph(javaMethod);
         new GraphBuilderPhase(runtime, GraphBuilderConfiguration.getSnippetDefault(), OptimisticOptimizations.NONE).apply(graph);
-        new CanonicalizerPhase(null, runtime, null).apply(graph);
+        new CanonicalizerPhase(null, runtime, new Assumptions(false)).apply(graph);
         new DeadCodeEliminationPhase().apply(graph);
 
         for (Node node : graph.getNodes()) {
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/DegeneratedLoopsTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/DegeneratedLoopsTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.common.*;
@@ -81,7 +82,7 @@
                 for (Invoke invoke : graph.getInvokes()) {
                     invoke.intrinsify(null);
                 }
-                new CanonicalizerPhase(null, runtime(), null).apply(graph);
+                new CanonicalizerPhase(null, runtime(), new Assumptions(false)).apply(graph);
                 StructuredGraph referenceGraph = parse(REFERENCE_SNIPPET);
                 Debug.dump(referenceGraph, "Graph");
                 assertEquals(referenceGraph, graph);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FloatingReadTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FloatingReadTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
@@ -54,7 +55,7 @@
         Debug.scope("FloatingReadTest", new DebugDumpScope(snippet), new Runnable() {
             public void run() {
                 StructuredGraph graph = parse(snippet);
-                new LoweringPhase(runtime(), null).apply(graph);
+                new LoweringPhase(runtime(), new Assumptions(false)).apply(graph);
                 new FloatingReadPhase().apply(graph);
 
                 ReturnNode returnNode = null;
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/IfBoxingEliminationTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/IfBoxingEliminationTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -26,11 +26,12 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.extended.*;
 import com.oracle.graal.phases.*;
-import com.oracle.graal.phases.PhasePlan.*;
+import com.oracle.graal.phases.PhasePlan.PhasePosition;
 import com.oracle.graal.phases.common.*;
 
 public class IfBoxingEliminationTest extends GraalCompilerTest {
@@ -85,18 +86,20 @@
                 for (Invoke invoke : graph.getInvokes()) {
                     hints.add(invoke);
                 }
-                new InliningPhase(null, runtime(), hints, null, null, phasePlan, OptimisticOptimizations.ALL).apply(graph);
-                new CanonicalizerPhase(null, runtime(), null).apply(graph);
+
+                Assumptions assumptions = new Assumptions(false);
+                new InliningPhase(null, runtime(), hints, assumptions, null, phasePlan, OptimisticOptimizations.ALL).apply(graph);
+                new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
                 new PhiStampPhase().apply(graph);
-                new CanonicalizerPhase(null, runtime(), null).apply(graph);
+                new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
                 Debug.dump(graph, "Graph");
                 new BoxingEliminationPhase().apply(graph);
                 Debug.dump(graph, "Graph");
                 new ExpandBoxingNodesPhase(pool).apply(graph);
-                new CanonicalizerPhase(null, runtime(), null).apply(graph);
+                new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
                 new DeadCodeEliminationPhase().apply(graph);
                 StructuredGraph referenceGraph = parse(REFERENCE_SNIPPET);
-                new CanonicalizerPhase(null, runtime(), null).apply(referenceGraph);
+                new CanonicalizerPhase(null, runtime(), assumptions).apply(referenceGraph);
                 new DeadCodeEliminationPhase().apply(referenceGraph);
 
                 assertEquals(referenceGraph, graph);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/IfCanonicalizerTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/IfCanonicalizerTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -26,6 +26,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
@@ -143,7 +144,7 @@
             n.replaceFirstInput(local, constant);
         }
         Debug.dump(graph, "Graph");
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        new CanonicalizerPhase(null, runtime(), new Assumptions(false)).apply(graph);
         StructuredGraph referenceGraph = parse(REFERENCE_SNIPPET);
         assertEquals(referenceGraph, graph);
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeExceptionTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeExceptionTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -26,6 +26,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
@@ -63,8 +64,9 @@
         for (Invoke invoke : graph.getInvokes()) {
             hints.add(invoke);
         }
-        new InliningPhase(null, runtime(), hints, null, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        Assumptions assumptions = new Assumptions(false);
+        new InliningPhase(null, runtime(), hints, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
         new DeadCodeEliminationPhase().apply(graph);
     }
 }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeHintsTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeHintsTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -26,6 +26,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
@@ -73,8 +74,10 @@
         for (Invoke invoke : graph.getInvokes()) {
             hints.add(invoke);
         }
-        new InliningPhase(null, runtime(), hints, null, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+
+        Assumptions assumptions = new Assumptions(false);
+        new InliningPhase(null, runtime(), hints, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
         new DeadCodeEliminationPhase().apply(graph);
         StructuredGraph referenceGraph = parse(REFERENCE_SNIPPET);
         assertEquals(referenceGraph, graph);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/LoopUnswitchTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/LoopUnswitchTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.loop.phases.*;
@@ -79,8 +80,9 @@
             ((StateSplit) stateSplit).setStateAfter(null);
         }
 
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
-        new CanonicalizerPhase(null, runtime(), null).apply(referenceGraph);
+        Assumptions assumptions = new Assumptions(false);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(referenceGraph);
         Debug.scope("Test", new DebugDumpScope("Test:" + snippet), new Runnable() {
             @Override
             public void run() {
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MonitorGraphTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MonitorGraphTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -28,6 +28,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.graph.iterators.*;
 import com.oracle.graal.nodes.*;
@@ -91,8 +92,9 @@
         for (Invoke invoke : graph.getInvokes()) {
             hints.add(invoke);
         }
-        new InliningPhase(null, runtime(), hints, null, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        Assumptions assumptions = new Assumptions(false);
+        new InliningPhase(null, runtime(), hints, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
         new DeadCodeEliminationPhase().apply(graph);
         return graph;
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ReassociateAndCanonicalTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ReassociateAndCanonicalTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.common.*;
@@ -241,9 +242,10 @@
 
     private <T extends Node & Node.IterableNodeType> void test(String test, String ref) {
         StructuredGraph testGraph = parse(test);
-        new CanonicalizerPhase(null, runtime(), null).apply(testGraph);
+        Assumptions assumptions = new Assumptions(false);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(testGraph);
         StructuredGraph refGraph = parse(ref);
-        new CanonicalizerPhase(null, runtime(), null).apply(refGraph);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(refGraph);
         assertEquals(testGraph, refGraph);
     }
 }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ScalarTypeSystemTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ScalarTypeSystemTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.common.*;
@@ -164,9 +165,10 @@
         StructuredGraph graph = parse(snippet);
         Debug.dump(graph, "Graph");
 //        TypeSystemTest.outputGraph(graph);
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        Assumptions assumptions = new Assumptions(false);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
         new ConditionalEliminationPhase().apply(graph);
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
         StructuredGraph referenceGraph = parse(referenceSnippet);
         assertEquals(referenceGraph, graph);
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/StampCanonicalizerTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/StampCanonicalizerTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.type.*;
 import com.oracle.graal.phases.common.*;
@@ -97,7 +98,7 @@
 
     private void testZeroReturn(String methodName) {
         StructuredGraph graph = parse(methodName);
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        new CanonicalizerPhase(null, runtime(), new Assumptions(false)).apply(graph);
         new DeadCodeEliminationPhase().apply(graph);
         assertConstantReturn(graph, 0);
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/StraighteningTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/StraighteningTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,6 +24,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.common.*;
@@ -88,7 +89,7 @@
         // No debug scope to reduce console noise for @Test(expected = ...) tests
         StructuredGraph graph = parse(snippet);
         Debug.dump(graph, "Graph");
-        new CanonicalizerPhase(null, runtime(), null).apply(graph);
+        new CanonicalizerPhase(null, runtime(), new Assumptions(false)).apply(graph);
         StructuredGraph referenceGraph = parse(REFERENCE_SNIPPET);
         assertEquals(referenceGraph, graph);
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/TypeSystemTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/TypeSystemTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -28,6 +28,7 @@
 
 import org.junit.Test;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.graph.Node.*;
@@ -189,12 +190,13 @@
         if (false) {
             StructuredGraph graph = parse(snippet);
             Debug.dump(graph, "Graph");
-            new CanonicalizerPhase(null, runtime(), null).apply(graph);
+            Assumptions assumptions = new Assumptions(false);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
             new ConditionalEliminationPhase().apply(graph);
-            new CanonicalizerPhase(null, runtime(), null).apply(graph);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
             new GlobalValueNumberingPhase().apply(graph);
             StructuredGraph referenceGraph = parse(referenceSnippet);
-            new CanonicalizerPhase(null, runtime(), null).apply(referenceGraph);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(referenceGraph);
             new GlobalValueNumberingPhase().apply(referenceGraph);
             assertEquals(referenceGraph, graph);
         }
@@ -253,9 +255,10 @@
         if (false) {
             StructuredGraph graph = parse(snippet);
             Debug.dump(graph, "Graph");
-            new CanonicalizerPhase(null, runtime(), null).apply(graph);
+            Assumptions assumptions = new Assumptions(false);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
             new ConditionalEliminationPhase().apply(graph);
-            new CanonicalizerPhase(null, runtime(), null).apply(graph);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
             Debug.dump(graph, "Graph");
             Assert.assertFalse("shouldn't have nodes of type " + clazz, graph.getNodes(clazz).iterator().hasNext());
         }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EscapeAnalysisTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EscapeAnalysisTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -26,6 +26,7 @@
 
 import org.junit.Test;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.test.*;
 import com.oracle.graal.nodes.*;
@@ -194,9 +195,10 @@
                 n.node().setProbability(100000);
             }
 
-            new InliningPhase(null, runtime(), null, null, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+            Assumptions assumptions = new Assumptions(false);
+            new InliningPhase(null, runtime(), null, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
             new DeadCodeEliminationPhase().apply(graph);
-            new PartialEscapeAnalysisPhase(null, runtime(), null, iterativeEscapeAnalysis).apply(graph);
+            new PartialEscapeAnalysisPhase(null, runtime(), assumptions, iterativeEscapeAnalysis).apply(graph);
             Assert.assertEquals(1, graph.getNodes(ReturnNode.class).count());
             ReturnNode returnNode = graph.getNodes(ReturnNode.class).first();
             if (expectedConstantResult != null) {
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/PartialEscapeAnalysisTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/PartialEscapeAnalysisTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -27,6 +27,7 @@
 
 import org.junit.Test;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.compiler.test.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
@@ -149,13 +150,15 @@
             for (Invoke n : graph.getInvokes()) {
                 n.node().setProbability(100000);
             }
-            new InliningPhase(null, runtime(), null, null, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+            Assumptions assumptions = new Assumptions(false);
+            new InliningPhase(null, runtime(), null, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
             new DeadCodeEliminationPhase().apply(graph);
-            new CanonicalizerPhase(null, runtime(), null).apply(graph);
-            new PartialEscapeAnalysisPhase(null, runtime(), null, false).apply(graph);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
+            new PartialEscapeAnalysisPhase(null, runtime(), assumptions, false).apply(graph);
+
             new CullFrameStatesPhase().apply(graph);
             new DeadCodeEliminationPhase().apply(graph);
-            new CanonicalizerPhase(null, runtime(), null).apply(graph);
+            new CanonicalizerPhase(null, runtime(), assumptions).apply(graph);
             return graph;
         } catch (AssertionFailedError t) {
             throw new RuntimeException(t.getMessage() + "\n" + getCanonicalGraphString(graph), t);
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java	Tue Nov 27 12:12:02 2012 +0100
@@ -75,7 +75,7 @@
         return Debug.scope("GraalCompiler", new Object[]{graph, method, this}, new Callable<CompilationResult>() {
 
             public CompilationResult call() {
-                final Assumptions assumptions = GraalOptions.OptAssumptions ? new Assumptions() : null;
+                final Assumptions assumptions = new Assumptions(GraalOptions.OptAssumptions);
                 final LIR lir = Debug.scope("FrontEnd", new Callable<LIR>() {
 
                     public LIR call() {
@@ -118,16 +118,9 @@
             new CanonicalizerPhase(target, runtime, assumptions).apply(graph);
         }
 
-        if (GraalOptions.Intrinsify) {
-            new IntrinsificationPhase(runtime).apply(graph);
-        }
-
         if (GraalOptions.Inline && !plan.isPhaseDisabled(InliningPhase.class)) {
             new InliningPhase(target, runtime, null, assumptions, cache, plan, optimisticOpts).apply(graph);
-
-            if (GraalOptions.OptCanonicalizer) {
-                new CanonicalizerPhase(target, runtime, assumptions).apply(graph);
-            }
+            new DeadCodeEliminationPhase().apply(graph);
 
             if (GraalOptions.CheckCastElimination && GraalOptions.OptCanonicalizer) {
                 new IterativeConditionalEliminationPhase(target, runtime, assumptions).apply(graph);
@@ -139,7 +132,7 @@
         plan.runPhases(PhasePosition.HIGH_LEVEL, graph);
 
         if (GraalOptions.FullUnroll) {
-            new LoopFullUnrollPhase(runtime).apply(graph);
+            new LoopFullUnrollPhase(runtime, assumptions).apply(graph);
             if (GraalOptions.OptCanonicalizer) {
                 new CanonicalizerPhase(target, runtime, assumptions).apply(graph);
             }
@@ -265,7 +258,7 @@
         TargetMethodAssembler tasm = backend.newAssembler(frameMap, lir);
         backend.emitCode(tasm, method, lir);
         CompilationResult targetMethod = tasm.finishTargetMethod(method, false);
-        if (assumptions != null && !assumptions.isEmpty()) {
+        if (!assumptions.isEmpty()) {
             targetMethod.setAssumptions(assumptions);
         }
 
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java	Tue Nov 27 12:12:02 2012 +0100
@@ -841,17 +841,16 @@
         } else {
             Variable value = load(operand(x.value()));
             LabelRef defaultTarget = x.defaultSuccessor() == null ? null : getLIRBlock(x.defaultSuccessor());
-            if (value.getKind() == Kind.Object || keyCount < GraalOptions.SequentialSwitchLimit) {
-                // only a few entries
+            if (value.getKind() != Kind.Int) {
+                // hopefully only a few entries
                 emitSequentialSwitch(x, value, defaultTarget);
             } else {
                 assert value.getKind() == Kind.Int;
                 long valueRange = x.keyAt(keyCount - 1).asLong() - x.keyAt(0).asLong() + 1;
                 int switchRangeCount = switchRangeCount(x);
-                int rangeDensity = keyCount / switchRangeCount;
-                if (rangeDensity >= GraalOptions.RangeTestsSwitchDensity) {
-                    emitSwitchRanges(x, switchRangeCount, value, defaultTarget);
-                } else if (keyCount / (double) valueRange >= GraalOptions.MinTableSwitchDensity) {
+                if (switchRangeCount == 0) {
+                    emitJump(getLIRBlock(x.defaultSuccessor()), null);
+                } else if (switchRangeCount >= GraalOptions.MinimumJumpTableSize && keyCount / (double) valueRange >= GraalOptions.MinTableSwitchDensity) {
                     int minValue = x.keyAt(0).asInt();
                     assert valueRange < Integer.MAX_VALUE;
                     LabelRef[] targets = new LabelRef[(int) valueRange];
@@ -862,6 +861,8 @@
                         targets[x.keyAt(i).asInt() - minValue] = getLIRBlock(x.keySuccessor(i));
                     }
                     emitTableSwitch(minValue, defaultTarget, targets, value);
+                } else if (keyCount / switchRangeCount >= GraalOptions.RangeTestsSwitchDensity) {
+                    emitSwitchRanges(x, switchRangeCount, value, defaultTarget);
                 } else {
                     emitSequentialSwitch(x, value, defaultTarget);
                 }
@@ -892,27 +893,26 @@
 
     private static int switchRangeCount(SwitchNode x) {
         int keyCount = x.keyCount();
-        int i = 0;
-        while (i < keyCount && x.keySuccessorIndex(i) == x.defaultSuccessorIndex()) {
-            i++;
+        int switchRangeCount = 0;
+        int defaultSux = x.defaultSuccessorIndex();
+
+        int key = x.keyAt(0).asInt();
+        int sux = x.keySuccessorIndex(0);
+        for (int i = 0; i < keyCount; i++) {
+            int newKey = x.keyAt(i).asInt();
+            int newSux = x.keySuccessorIndex(i);
+            if (newSux != defaultSux && (newKey != key + 1 || sux != newSux)) {
+                switchRangeCount++;
+            }
+            key = newKey;
+            sux = newSux;
         }
-        if (i == keyCount) {
-            return 0;
-        } else {
-            int switchRangeCount = 1;
-            i++;
-            for (; i < keyCount; i++) {
-                if (x.keySuccessorIndex(i) != x.defaultSuccessorIndex()) {
-                    if (x.keyAt(i).asInt() != x.keyAt(i - 1).asInt() + 1 || x.keySuccessorIndex(i) != x.keySuccessorIndex(i - 1)) {
-                        switchRangeCount++;
-                    }
-                }
-            }
-            return switchRangeCount;
-        }
+        return switchRangeCount;
     }
 
     private void emitSwitchRanges(SwitchNode x, int switchRangeCount, Variable keyValue, LabelRef defaultTarget) {
+        assert switchRangeCount >= 1 : "switch ranges should not be used for emitting only the default case";
+
         int[] lowKeys = new int[switchRangeCount];
         int[] highKeys = new int[switchRangeCount];
         LabelRef[] targets = new LabelRef[switchRangeCount];
@@ -920,40 +920,28 @@
         int keyCount = x.keyCount();
         int defaultSuccessor = x.defaultSuccessorIndex();
 
-        int current = 0;
-        int i = 0;
-        while (i < keyCount && x.keySuccessorIndex(i) == x.defaultSuccessorIndex()) {
-            i++;
+        int current = -1;
+        int key = -1;
+        int successor = -1;
+        for (int i = 0; i < keyCount; i++) {
+            int newSuccessor = x.keySuccessorIndex(i);
+            int newKey = x.keyAt(i).asInt();
+            if (newSuccessor != defaultSuccessor) {
+                if (key + 1 == newKey && successor == newSuccessor) {
+                    // still in same range
+                    highKeys[current] = newKey;
+                } else {
+                    current++;
+                    lowKeys[current] = newKey;
+                    highKeys[current] = newKey;
+                    targets[current] = getLIRBlock(x.blockSuccessor(newSuccessor));
+                }
+            }
+            key = newKey;
+            successor = newSuccessor;
         }
-        if (i == keyCount) {
-            emitJump(defaultTarget, null);
-        } else {
-            int key = x.keyAt(i).asInt();
-            int successor = x.keySuccessorIndex(i);
-            lowKeys[current] = key;
-            highKeys[current] = key;
-            targets[current] = getLIRBlock(x.blockSuccessor(successor));
-            i++;
-            for (; i < keyCount; i++) {
-                int newSuccessor = x.keySuccessorIndex(i);
-                if (newSuccessor != defaultSuccessor) {
-                    int newKey = x.keyAt(i).asInt();
-                    if (key + 1 == newKey && successor == newSuccessor) {
-                        // still in same range
-                        highKeys[current] = newKey;
-                    } else {
-                        current++;
-                        lowKeys[current] = newKey;
-                        highKeys[current] = newKey;
-                        targets[current] = getLIRBlock(x.blockSuccessor(newSuccessor));
-                    }
-                    key = newKey;
-                }
-                successor = newSuccessor;
-            }
-            assert current == switchRangeCount - 1;
-            emitSwitchRanges(lowKeys, highKeys, targets, defaultTarget, keyValue);
-        }
+        assert current == switchRangeCount - 1;
+        emitSwitchRanges(lowKeys, highKeys, targets, defaultTarget, keyValue);
     }
 
     public FrameMap frameMap() {
--- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/DebugScope.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/DebugScope.java	Tue Nov 27 12:12:02 2012 +0100
@@ -106,7 +106,7 @@
 
     public void log(String msg, Object... args) {
         if (isLogEnabled()) {
-            if (lastLogScope.get() != this) {
+            if (lastLogScope.get() == null || !lastLogScope.get().qualifiedName.equals(this.qualifiedName)) {
                 output.println("scope: " + qualifiedName);
                 lastLogScope.set(this);
             }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Tue Nov 27 12:12:02 2012 +0100
@@ -125,6 +125,7 @@
             Address address;
             Value index = operand(x.offset());
             if (ValueUtil.isConstant(index) && NumUtil.isInt(ValueUtil.asConstant(index).asLong() + disp)) {
+                assert !runtime.needsDataPatch(asConstant(index));
                 disp += (int) ValueUtil.asConstant(index).asLong();
                 address = new Address(kind, load(operand(x.object())), disp);
             } else {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java	Tue Nov 27 12:12:02 2012 +0100
@@ -31,6 +31,7 @@
 import java.util.concurrent.*;
 import java.util.concurrent.atomic.*;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.*;
 import com.oracle.graal.debug.*;
@@ -38,7 +39,6 @@
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.hotspot.phases.*;
-import com.oracle.graal.hotspot.snippets.*;
 import com.oracle.graal.java.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
@@ -51,7 +51,6 @@
 public class VMToCompilerImpl implements VMToCompiler {
 
     private final HotSpotGraalRuntime graalRuntime;
-    private IntrinsifyArrayCopyPhase intrinsifyArrayCopy;
 
     public final HotSpotTypePrimitive typeBoolean;
     public final HotSpotTypePrimitive typeChar;
@@ -135,10 +134,10 @@
 
                 @Override
                 public void run() {
-                    VMToCompilerImpl.this.intrinsifyArrayCopy = new IntrinsifyArrayCopyPhase(runtime);
-                    SnippetInstaller installer = new SnippetInstaller(runtime, runtime.getGraalRuntime().getTarget());
+                    Assumptions assumptions = new Assumptions(GraalOptions.OptAssumptions);
+                    SnippetInstaller installer = new SnippetInstaller(runtime, runtime.getGraalRuntime().getTarget(), HotSpotGraalRuntime.wordStamp(), assumptions);
                     GraalIntrinsics.installIntrinsics(installer);
-                    runtime.installSnippets(installer);
+                    runtime.installSnippets(installer, assumptions);
                 }
             });
 
@@ -571,9 +570,6 @@
         if (onStackReplacement) {
             phasePlan.addPhase(PhasePosition.AFTER_PARSING, new OnStackReplacementPhase());
         }
-        if (GraalOptions.Intrinsify) {
-            phasePlan.addPhase(PhasePosition.HIGH_LEVEL, intrinsifyArrayCopy);
-        }
         return phasePlan;
     }
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Tue Nov 27 12:12:02 2012 +0100
@@ -1,749 +1,751 @@
-/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.meta;
-
-import static com.oracle.graal.api.code.DeoptimizationAction.*;
-import static com.oracle.graal.api.code.MemoryBarriers.*;
-import static com.oracle.graal.api.meta.DeoptimizationReason.*;
-import static com.oracle.graal.api.meta.Value.*;
-import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
-import static com.oracle.graal.hotspot.snippets.SystemSnippets.*;
-import static com.oracle.graal.java.GraphBuilderPhase.*;
-import static com.oracle.graal.nodes.StructuredGraph.*;
-import static com.oracle.graal.nodes.UnwindNode.*;
-import static com.oracle.graal.nodes.java.RegisterFinalizerNode.*;
-import static com.oracle.graal.snippets.Log.*;
-import static com.oracle.graal.snippets.MathSnippetsX86.*;
-
-import java.lang.reflect.*;
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.code.CodeUtil.RefMapFormatter;
-import com.oracle.graal.api.code.CompilationResult.Call;
-import com.oracle.graal.api.code.CompilationResult.DataPatch;
-import com.oracle.graal.api.code.CompilationResult.Mark;
-import com.oracle.graal.api.code.CompilationResult.Safepoint;
-import com.oracle.graal.api.code.Register.RegisterFlag;
-import com.oracle.graal.api.code.RuntimeCall.Descriptor;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.hotspot.*;
-import com.oracle.graal.hotspot.bridge.*;
-import com.oracle.graal.hotspot.nodes.*;
-import com.oracle.graal.hotspot.phases.*;
-import com.oracle.graal.hotspot.snippets.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.extended.*;
-import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.graal.nodes.type.*;
-import com.oracle.graal.phases.*;
-import com.oracle.graal.printer.*;
-import com.oracle.graal.snippets.*;
-
-/**
- * HotSpot implementation of {@link GraalCodeCacheProvider}.
- */
-public abstract class HotSpotRuntime implements GraalCodeCacheProvider {
-    public final HotSpotVMConfig config;
-
-    protected final RegisterConfig regConfig;
-    protected final RegisterConfig globalStubRegConfig;
-    protected final HotSpotGraalRuntime graalRuntime;
-
-    private CheckCastSnippets.Templates checkcastSnippets;
-    private InstanceOfSnippets.Templates instanceofSnippets;
-    private NewObjectSnippets.Templates newObjectSnippets;
-    private MonitorSnippets.Templates monitorSnippets;
-
-    private final Map<Descriptor, RuntimeCall> runtimeCalls = new HashMap<>();
-
-    protected Value ret(Kind kind) {
-        if (kind.isVoid()) {
-            return ILLEGAL;
-        }
-        return globalStubRegConfig.getReturnRegister(kind).asValue(kind);
-    }
-
-    protected Value arg(int index, Kind kind) {
-        if (kind.isFloat() || kind.isDouble()) {
-            return globalStubRegConfig.getCallingConventionRegisters(CallingConvention.Type.RuntimeCall, RegisterFlag.FPU)[index].asValue(kind);
-        }
-        return globalStubRegConfig.getCallingConventionRegisters(CallingConvention.Type.RuntimeCall, RegisterFlag.CPU)[index].asValue(kind);
-    }
-
-    protected Value scratch(Kind kind) {
-        return globalStubRegConfig.getScratchRegister().asValue(kind);
-    }
-
-    public HotSpotRuntime(HotSpotVMConfig config, HotSpotGraalRuntime graalRuntime) {
-        this.config = config;
-        this.graalRuntime = graalRuntime;
-        regConfig = createRegisterConfig(false);
-        globalStubRegConfig = createRegisterConfig(true);
-
-        addRuntimeCall(UNWIND_EXCEPTION, config.unwindExceptionStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Void),
-                        /* arg0: exception */ arg(0, Kind.Object));
-
-        addRuntimeCall(OnStackReplacementPhase.OSR_MIGRATION_END, config.osrMigrationEndStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Void),
-                        /* arg0:      long */ arg(0, Kind.Long));
-
-        addRuntimeCall(REGISTER_FINALIZER, config.registerFinalizerStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Void),
-                        /* arg0:    object */ arg(0, Kind.Object));
-
-        addRuntimeCall(CREATE_NULL_POINTER_EXCEPTION, config.createNullPointerExceptionStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Object));
-
-        addRuntimeCall(CREATE_OUT_OF_BOUNDS_EXCEPTION, config.createOutOfBoundsExceptionStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Object),
-                        /* arg0:     index */ arg(0, Kind.Int));
-
-        addRuntimeCall(JAVA_TIME_MILLIS, config.javaTimeMillisStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Long));
-
-        addRuntimeCall(JAVA_TIME_NANOS, config.javaTimeNanosStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Long));
-
-        addRuntimeCall(ARITHMETIC_SIN, config.arithmeticSinStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Double),
-                        /* arg0:     index */ arg(0, Kind.Double));
-
-        addRuntimeCall(ARITHMETIC_COS, config.arithmeticCosStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Double),
-                        /* arg0:     index */ arg(0, Kind.Double));
-
-        addRuntimeCall(ARITHMETIC_TAN, config.arithmeticTanStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Double),
-                        /* arg0:     index */ arg(0, Kind.Double));
-
-        addRuntimeCall(LOG_PRIMITIVE, config.logPrimitiveStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Void),
-                        /* arg0:  typeChar */ arg(0, Kind.Int),
-                        /* arg1:     value */ arg(1, Kind.Long),
-                        /* arg2:   newline */ arg(2, Kind.Boolean));
-
-        addRuntimeCall(LOG_PRINTF, config.logPrintfStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Void),
-                        /* arg0:    format */ arg(0, Kind.Object),
-                        /* arg1:     value */ arg(1, Kind.Long));
-
-        addRuntimeCall(LOG_OBJECT, config.logObjectStub,
-                        /*           temps */ null,
-                        /*             ret */ ret(Kind.Void),
-                        /* arg0:    object */ arg(0, Kind.Object),
-                        /* arg1:     flags */ arg(1, Kind.Int));
-    }
-
-
-    /**
-     * Registers the details for linking a runtime call.
-     *
-     * @param descriptor name and signature of the call
-     * @param address target address of the call
-     * @param tempRegs temporary registers used (and killed) by the call (null if none)
-     * @param ret where the call returns its result
-     * @param args where arguments are passed to the call
-     */
-    protected void addRuntimeCall(Descriptor descriptor, long address, Register[] tempRegs, Value ret, Value... args) {
-        Value[] temps = tempRegs == null || tempRegs.length == 0 ? Value.NONE : new Value[tempRegs.length];
-        for (int i = 0; i < temps.length; i++) {
-            temps[i] = tempRegs[i].asValue();
-        }
-        Kind retKind = ret.getKind();
-        if (retKind == Kind.Illegal) {
-            retKind = Kind.Void;
-        }
-        assert retKind.equals(descriptor.getResultKind()) : descriptor + " incompatible with result location " + ret;
-        Kind[] argKinds = descriptor.getArgumentKinds();
-        assert argKinds.length == args.length : descriptor + " incompatible with number of argument locations: " + args.length;
-        for (int i = 0; i < argKinds.length; i++) {
-            assert argKinds[i].equals(args[i].getKind()) : descriptor + " incompatible with argument location " + i + ": " + args[i];
-        }
-        HotSpotRuntimeCall runtimeCall = new HotSpotRuntimeCall(descriptor, address, new CallingConvention(temps, 0, ret, args), graalRuntime.getCompilerToVM());
-        runtimeCalls.put(descriptor, runtimeCall);
-    }
-
-    protected abstract RegisterConfig createRegisterConfig(boolean globalStubConfig);
-
-    public void installSnippets(SnippetInstaller installer) {
-        installer.install(SystemSnippets.class);
-        installer.install(UnsafeSnippets.class);
-        installer.install(ArrayCopySnippets.class);
-
-        installer.install(CheckCastSnippets.class);
-        installer.install(InstanceOfSnippets.class);
-        installer.install(NewObjectSnippets.class);
-        installer.install(MonitorSnippets.class);
-
-        checkcastSnippets = new CheckCastSnippets.Templates(this, graalRuntime.getTarget());
-        instanceofSnippets = new InstanceOfSnippets.Templates(this, graalRuntime.getTarget());
-        newObjectSnippets = new NewObjectSnippets.Templates(this, graalRuntime.getTarget(), config.useTLAB);
-        monitorSnippets = new MonitorSnippets.Templates(this, graalRuntime.getTarget(), config.useFastLocking);
-    }
-
-
-    public HotSpotGraalRuntime getGraalRuntime() {
-        return graalRuntime;
-    }
-
-    /**
-     * Gets the register holding the current thread.
-     */
-    public abstract Register threadRegister();
-
-    /**
-     * Gets the stack pointer register.
-     */
-    public abstract Register stackPointerRegister();
-
-    @Override
-    public String disassemble(CodeInfo info, CompilationResult tm) {
-        byte[] code = info.getCode();
-        TargetDescription target = graalRuntime.getTarget();
-        HexCodeFile hcf = new HexCodeFile(code, info.getStart(), target.arch.getName(), target.wordSize * 8);
-        if (tm != null) {
-            HexCodeFile.addAnnotations(hcf, tm.getAnnotations());
-            addExceptionHandlersComment(tm, hcf);
-            Register fp = regConfig.getFrameRegister();
-            RefMapFormatter slotFormatter = new RefMapFormatter(target.arch, target.wordSize, fp, 0);
-            for (Safepoint safepoint : tm.getSafepoints()) {
-                if (safepoint instanceof Call) {
-                    Call call = (Call) safepoint;
-                    if (call.debugInfo != null) {
-                        hcf.addComment(call.pcOffset + call.size, CodeUtil.append(new StringBuilder(100), call.debugInfo, slotFormatter).toString());
-                    }
-                    addOperandComment(hcf, call.pcOffset, "{" + getTargetName(call) + "}");
-                } else {
-                    if (safepoint.debugInfo != null) {
-                        hcf.addComment(safepoint.pcOffset, CodeUtil.append(new StringBuilder(100), safepoint.debugInfo, slotFormatter).toString());
-                    }
-                    addOperandComment(hcf, safepoint.pcOffset, "{safepoint}");
-                }
-            }
-            for (DataPatch site : tm.getDataReferences()) {
-                hcf.addOperandComment(site.pcOffset, "{" + site.constant + "}");
-            }
-            for (Mark mark : tm.getMarks()) {
-                hcf.addComment(mark.pcOffset, getMarkName(mark));
-            }
-        }
-        return hcf.toEmbeddedString();
-    }
-
-    /**
-     * Decodes a call target to a mnemonic if possible.
-     */
-    private String getTargetName(Call call) {
-        Field[] fields = config.getClass().getDeclaredFields();
-        for (Field f : fields) {
-            if (f.getName().endsWith("Stub")) {
-                f.setAccessible(true);
-                try {
-                    Object address = f.get(config);
-                    if (address.equals(call.target)) {
-                        return f.getName() + ":0x" + Long.toHexString((Long) address);
-                    }
-                } catch (Exception e) {
-                }
-            }
-        }
-        return String.valueOf(call.target);
-    }
-
-    /**
-     * Decodes a mark to a mnemonic if possible.
-     */
-    private static String getMarkName(Mark mark) {
-        Field[] fields = Marks.class.getDeclaredFields();
-        for (Field f : fields) {
-            if (Modifier.isStatic(f.getModifiers()) && f.getName().startsWith("MARK_")) {
-                f.setAccessible(true);
-                try {
-                    if (f.get(null).equals(mark.id)) {
-                        return f.getName();
-                    }
-                } catch (Exception e) {
-                }
-            }
-        }
-        return "MARK:" + mark.id;
-    }
-
-    private static void addExceptionHandlersComment(CompilationResult tm, HexCodeFile hcf) {
-        if (!tm.getExceptionHandlers().isEmpty()) {
-            String nl = HexCodeFile.NEW_LINE;
-            StringBuilder buf = new StringBuilder("------ Exception Handlers ------").append(nl);
-            for (CompilationResult.ExceptionHandler e : tm.getExceptionHandlers()) {
-                buf.append("    ").
-                    append(e.pcOffset).append(" -> ").
-                    append(e.handlerPos).
-                    append(nl);
-                hcf.addComment(e.pcOffset, "[exception -> " + e.handlerPos + "]");
-                hcf.addComment(e.handlerPos, "[exception handler for " + e.pcOffset + "]");
-            }
-            hcf.addComment(0, buf.toString());
-        }
-    }
-
-    private static void addOperandComment(HexCodeFile hcf, int pos, String comment) {
-        String oldValue = hcf.addOperandComment(pos, comment);
-        assert oldValue == null : "multiple comments for operand of instruction at " + pos + ": " + comment + ", " + oldValue;
-    }
-
-    @Override
-    public ResolvedJavaType lookupJavaType(Constant constant) {
-        if (!constant.getKind().isObject() || constant.isNull()) {
-            return null;
-        }
-        Object o = constant.asObject();
-        return HotSpotResolvedJavaType.fromClass(o.getClass());
-    }
-
-    @Override
-    public int getSizeOfLockData() {
-        return config.basicLockSize;
-    }
-
-    @Override
-    public boolean constantEquals(Constant x, Constant y) {
-        return x.equals(y);
-    }
-
-    @Override
-    public RegisterConfig lookupRegisterConfig(JavaMethod method) {
-        return regConfig;
-    }
-
-    /**
-     * HotSpots needs an area suitable for storing a program counter for temporary use during the deoptimization process.
-     */
-    @Override
-    public int getCustomStackAreaSize() {
-        return graalRuntime.getTarget().wordSize;
-    }
-
-    @Override
-    public int getMinimumOutgoingSize() {
-        return config.runtimeCallStackSize;
-    }
-
-    @Override
-    public int lookupArrayLength(Constant array) {
-        if (!array.getKind().isObject() || array.isNull() || !array.asObject().getClass().isArray()) {
-            throw new IllegalArgumentException(array + " is not an array");
-        }
-        return Array.getLength(array.asObject());
-    }
-
-    @Override
-    public void lower(Node n, LoweringTool tool) {
-        StructuredGraph graph = (StructuredGraph) n.graph();
-        Kind wordKind = graalRuntime.getTarget().wordKind;
-        if (n instanceof ArrayLengthNode) {
-            ArrayLengthNode arrayLengthNode = (ArrayLengthNode) n;
-            SafeReadNode safeReadArrayLength = safeReadArrayLength(arrayLengthNode.array(), StructuredGraph.INVALID_GRAPH_ID);
-            graph.replaceFixedWithFixed(arrayLengthNode, safeReadArrayLength);
-        } else if (n instanceof Invoke) {
-            Invoke invoke = (Invoke) n;
-            if (invoke.callTarget() instanceof MethodCallTargetNode) {
-                MethodCallTargetNode callTarget = invoke.methodCallTarget();
-                NodeInputList<ValueNode> parameters = callTarget.arguments();
-                ValueNode receiver = parameters.size() <= 0 ? null : parameters.get(0);
-                if (!callTarget.isStatic() && receiver.kind() == Kind.Object && !receiver.objectStamp().nonNull()) {
-                    invoke.node().dependencies().add(tool.createNullCheckGuard(receiver, invoke.leafGraphId()));
-                }
-                Kind[] signature = MetaUtil.signatureToKinds(callTarget.targetMethod().getSignature(), callTarget.isStatic() ? null : callTarget.targetMethod().getDeclaringClass().getKind());
-
-                AbstractCallTargetNode loweredCallTarget = null;
-                if (callTarget.invokeKind() == InvokeKind.Virtual &&
-                    GraalOptions.InlineVTableStubs &&
-                    (GraalOptions.AlwaysInlineVTableStubs || invoke.isPolymorphic())) {
-
-                    HotSpotResolvedJavaMethod hsMethod = (HotSpotResolvedJavaMethod) callTarget.targetMethod();
-                    if (!hsMethod.getDeclaringClass().isInterface()) {
-                        int vtableEntryOffset = hsMethod.vtableEntryOffset();
-                        if (vtableEntryOffset > 0) {
-                            // We use LocationNode.ANY_LOCATION for the reads that access the vtable entry and the compiled code entry
-                            // as HotSpot does not guarantee they are final values.
-                            assert vtableEntryOffset > 0;
-                            LoadHubNode hub = graph.add(new LoadHubNode(receiver, wordKind));
-                            ReadNode metaspaceMethod = graph.add(new ReadNode(hub, LocationNode.create(LocationNode.ANY_LOCATION, wordKind, vtableEntryOffset, graph), StampFactory.forKind(wordKind())));
-                            ReadNode compiledEntry = graph.add(new ReadNode(metaspaceMethod, LocationNode.create(LocationNode.ANY_LOCATION, wordKind, config.methodCompiledEntryOffset, graph), StampFactory.forKind(wordKind())));
-
-                            loweredCallTarget = graph.add(new HotSpotIndirectCallTargetNode(metaspaceMethod, compiledEntry, parameters, invoke.node().stamp(), signature, callTarget.targetMethod(), CallingConvention.Type.JavaCall));
-
-                            graph.addBeforeFixed(invoke.node(), hub);
-                            graph.addAfterFixed(hub, metaspaceMethod);
-                            graph.addAfterFixed(metaspaceMethod, compiledEntry);
-                        }
-                    }
-                }
-
-                if (loweredCallTarget == null) {
-                    loweredCallTarget = graph.add(new HotSpotDirectCallTargetNode(parameters, invoke.node().stamp(), signature, callTarget.targetMethod(), CallingConvention.Type.JavaCall, callTarget.invokeKind()));
-                }
-                callTarget.replaceAndDelete(loweredCallTarget);
-            }
-        } else if (n instanceof LoadFieldNode) {
-            LoadFieldNode field = (LoadFieldNode) n;
-            int displacement = ((HotSpotResolvedJavaField) field.field()).offset();
-            assert field.kind() != Kind.Illegal;
-            ReadNode memoryRead = graph.add(new ReadNode(field.object(), LocationNode.create(field.field(), field.field().getKind(), displacement, graph), field.stamp()));
-            memoryRead.dependencies().add(tool.createNullCheckGuard(field.object(), field.leafGraphId()));
-            graph.replaceFixedWithFixed(field, memoryRead);
-            if (field.isVolatile()) {
-                MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_READ));
-                graph.addBeforeFixed(memoryRead, preMembar);
-                MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_READ));
-                graph.addAfterFixed(memoryRead, postMembar);
-            }
-        } else if (n instanceof StoreFieldNode) {
-            StoreFieldNode storeField = (StoreFieldNode) n;
-            HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) storeField.field();
-            WriteNode memoryWrite = graph.add(new WriteNode(storeField.object(), storeField.value(), LocationNode.create(field, field.getKind(), field.offset(), graph)));
-            memoryWrite.dependencies().add(tool.createNullCheckGuard(storeField.object(), storeField.leafGraphId()));
-            memoryWrite.setStateAfter(storeField.stateAfter());
-            graph.replaceFixedWithFixed(storeField, memoryWrite);
-            FixedWithNextNode last = memoryWrite;
-            if (field.getKind() == Kind.Object && !memoryWrite.value().objectStamp().alwaysNull()) {
-                FieldWriteBarrier writeBarrier = graph.add(new FieldWriteBarrier(memoryWrite.object()));
-                graph.addAfterFixed(memoryWrite, writeBarrier);
-                last = writeBarrier;
-            }
-            if (storeField.isVolatile()) {
-                MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_WRITE));
-                graph.addBeforeFixed(memoryWrite, preMembar);
-                MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_WRITE));
-                graph.addAfterFixed(last, postMembar);
-            }
-        } else if (n instanceof CompareAndSwapNode) {
-            // Separate out GC barrier semantics
-            CompareAndSwapNode cas = (CompareAndSwapNode) n;
-            ValueNode expected = cas.expected();
-            if (expected.kind() == Kind.Object && !cas.newValue().objectStamp().alwaysNull()) {
-                ResolvedJavaType type = cas.object().objectStamp().type();
-                if (type != null && !type.isArrayClass() && type.toJava() != Object.class) {
-                    // Use a field write barrier since it's not an array store
-                    FieldWriteBarrier writeBarrier = graph.add(new FieldWriteBarrier(cas.object()));
-                    graph.addAfterFixed(cas, writeBarrier);
-                } else {
-                    // This may be an array store so use an array write barrier
-                    LocationNode location = IndexedLocationNode.create(LocationNode.ANY_LOCATION, cas.expected().kind(), cas.displacement(), cas.offset(), graph, false);
-                    graph.addAfterFixed(cas, graph.add(new ArrayWriteBarrier(cas.object(), location)));
-                }
-            }
-        } else if (n instanceof LoadIndexedNode) {
-            LoadIndexedNode loadIndexed = (LoadIndexedNode) n;
-            ValueNode boundsCheck = createBoundsCheck(loadIndexed, tool);
-            Kind elementKind = loadIndexed.elementKind();
-            LocationNode arrayLocation = createArrayLocation(graph, elementKind, loadIndexed.index());
-            ReadNode memoryRead = graph.add(new ReadNode(loadIndexed.array(), arrayLocation, loadIndexed.stamp()));
-            memoryRead.dependencies().add(boundsCheck);
-            graph.replaceFixedWithFixed(loadIndexed, memoryRead);
-        } else if (n instanceof StoreIndexedNode) {
-            StoreIndexedNode storeIndexed = (StoreIndexedNode) n;
-            ValueNode boundsCheck = createBoundsCheck(storeIndexed, tool);
-
-            Kind elementKind = storeIndexed.elementKind();
-            LocationNode arrayLocation = createArrayLocation(graph, elementKind, storeIndexed.index());
-            ValueNode value = storeIndexed.value();
-            ValueNode array = storeIndexed.array();
-            if (elementKind == Kind.Object && !value.objectStamp().alwaysNull()) {
-                // Store check!
-                ResolvedJavaType arrayType = array.objectStamp().type();
-                if (arrayType != null && array.objectStamp().isExactType()) {
-                    ResolvedJavaType elementType = arrayType.getComponentType();
-                    if (!MetaUtil.isJavaLangObject(elementType)) {
-                        CheckCastNode checkcast = graph.add(new CheckCastNode(elementType, value, null));
-                        graph.addBeforeFixed(storeIndexed, checkcast);
-                        value = checkcast;
-                    }
-                } else {
-                    LoadHubNode arrayClass = graph.add(new LoadHubNode(array, wordKind));
-                    LocationNode location = LocationNode.create(LocationNode.FINAL_LOCATION, wordKind, config.arrayClassElementOffset, graph);
-                    FloatingReadNode arrayElementKlass = graph.unique(new FloatingReadNode(arrayClass, location, null, StampFactory.forKind(wordKind())));
-                    CheckCastDynamicNode checkcast = graph.add(new CheckCastDynamicNode(arrayElementKlass, value));
-                    graph.addBeforeFixed(storeIndexed, checkcast);
-                    graph.addBeforeFixed(checkcast, arrayClass);
-                    value = checkcast;
-                }
-            }
-            WriteNode memoryWrite = graph.add(new WriteNode(array, value, arrayLocation));
-            memoryWrite.dependencies().add(boundsCheck);
-            memoryWrite.setStateAfter(storeIndexed.stateAfter());
-
-            graph.replaceFixedWithFixed(storeIndexed, memoryWrite);
-
-            if (elementKind == Kind.Object && !value.objectStamp().alwaysNull()) {
-                graph.addAfterFixed(memoryWrite, graph.add(new ArrayWriteBarrier(array, arrayLocation)));
-            }
-        } else if (n instanceof UnsafeLoadNode) {
-            UnsafeLoadNode load = (UnsafeLoadNode) n;
-            assert load.kind() != Kind.Illegal;
-            IndexedLocationNode location = IndexedLocationNode.create(LocationNode.ANY_LOCATION, load.loadKind(), load.displacement(), load.offset(), graph, false);
-            ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp()));
-            // An unsafe read must not floating outside its block as may float above an explicit null check on its object.
-            memoryRead.dependencies().add(BeginNode.prevBegin(load));
-            graph.replaceFixedWithFixed(load, memoryRead);
-        } else if (n instanceof UnsafeStoreNode) {
-            UnsafeStoreNode store = (UnsafeStoreNode) n;
-            IndexedLocationNode location = IndexedLocationNode.create(LocationNode.ANY_LOCATION, store.storeKind(), store.displacement(), store.offset(), graph, false);
-            ValueNode object = store.object();
-            WriteNode write = graph.add(new WriteNode(object, store.value(), location));
-            write.setStateAfter(store.stateAfter());
-            graph.replaceFixedWithFixed(store, write);
-            if (write.value().kind() == Kind.Object && !write.value().objectStamp().alwaysNull()) {
-                ResolvedJavaType type = object.objectStamp().type();
-                WriteBarrier writeBarrier;
-                if (type != null && !type.isArrayClass() && type.toJava() != Object.class) {
-                    // Use a field write barrier since it's not an array store
-                    writeBarrier = graph.add(new FieldWriteBarrier(object));
-                } else {
-                    // This may be an array store so use an array write barrier
-                    writeBarrier = graph.add(new ArrayWriteBarrier(object, location));
-                }
-                graph.addAfterFixed(write, writeBarrier);
-            }
-        } else if (n instanceof LoadHubNode) {
-            LoadHubNode loadHub = (LoadHubNode) n;
-            assert loadHub.kind() == wordKind;
-            LocationNode location = LocationNode.create(LocationNode.FINAL_LOCATION, wordKind, config.hubOffset, graph);
-            ValueNode object = loadHub.object();
-            ValueNode guard = tool.createNullCheckGuard(object, StructuredGraph.INVALID_GRAPH_ID);
-            ReadNode hub = graph.add(new ReadNode(object, location, StampFactory.forKind(wordKind())));
-            hub.dependencies().add(guard);
-            graph.replaceFixed(loadHub, hub);
-        } else if (n instanceof CheckCastNode) {
-            checkcastSnippets.lower((CheckCastNode) n, tool);
-        } else if (n instanceof CheckCastDynamicNode) {
-            checkcastSnippets.lower((CheckCastDynamicNode) n);
-        } else if (n instanceof InstanceOfNode) {
-            instanceofSnippets.lower((InstanceOfNode) n, tool);
-        } else if (n instanceof NewInstanceNode) {
-            newObjectSnippets.lower((NewInstanceNode) n, tool);
-        } else if (n instanceof NewArrayNode) {
-            newObjectSnippets.lower((NewArrayNode) n, tool);
-        } else if (n instanceof MonitorEnterNode) {
-            monitorSnippets.lower((MonitorEnterNode) n, tool);
-        } else if (n instanceof MonitorExitNode) {
-            monitorSnippets.lower((MonitorExitNode) n, tool);
-        } else if (n instanceof TLABAllocateNode) {
-            newObjectSnippets.lower((TLABAllocateNode) n, tool);
-        } else if (n instanceof InitializeObjectNode) {
-            newObjectSnippets.lower((InitializeObjectNode) n, tool);
-        } else if (n instanceof InitializeArrayNode) {
-            newObjectSnippets.lower((InitializeArrayNode) n, tool);
-        } else if (n instanceof NewMultiArrayNode) {
-            newObjectSnippets.lower((NewMultiArrayNode) n, tool);
-        } else {
-            assert false : "Node implementing Lowerable not handled: " + n;
-        }
-    }
-
-    private static IndexedLocationNode createArrayLocation(Graph graph, Kind elementKind, ValueNode index) {
-        return IndexedLocationNode.create(LocationNode.getArrayLocation(elementKind), elementKind, elementKind.getArrayBaseOffset(), index, graph, true);
-    }
-
-    private SafeReadNode safeReadArrayLength(ValueNode array, long leafGraphId) {
-        return safeRead(array.graph(), Kind.Int, array, config.arrayLengthOffset, StampFactory.positiveInt(), leafGraphId);
-    }
-
-    private static ValueNode createBoundsCheck(AccessIndexedNode n, LoweringTool tool) {
-        StructuredGraph graph = (StructuredGraph) n.graph();
-        ArrayLengthNode arrayLength = graph.add(new ArrayLengthNode(n.array()));
-        ValueNode guard = tool.createGuard(graph.unique(new IntegerBelowThanNode(n.index(), arrayLength)), BoundsCheckException, InvalidateReprofile, n.leafGraphId());
-
-        graph.addBeforeFixed(n, arrayLength);
-        return guard;
-    }
-
-    @Override
-    public StructuredGraph intrinsicGraph(ResolvedJavaMethod caller, int bci, ResolvedJavaMethod method, List<? extends Node> parameters) {
-        ResolvedJavaType holder = method.getDeclaringClass();
-        String fullName = method.getName() + ((HotSpotSignature) method.getSignature()).asString();
-        Kind wordKind = graalRuntime.getTarget().wordKind;
-        if (holder.toJava() == Object.class) {
-            if (fullName.equals("getClass()Ljava/lang/Class;")) {
-                ValueNode obj = (ValueNode) parameters.get(0);
-                ObjectStamp stamp = (ObjectStamp) obj.stamp();
-                if (stamp.nonNull() && stamp.isExactType()) {
-                    StructuredGraph graph = new StructuredGraph();
-                    ValueNode result = ConstantNode.forObject(stamp.type().toJava(), this, graph);
-                    ReturnNode ret = graph.add(new ReturnNode(result));
-                    graph.start().setNext(ret);
-                    return graph;
-                }
-                StructuredGraph graph = new StructuredGraph();
-                LocalNode receiver = graph.unique(new LocalNode(0, StampFactory.objectNonNull()));
-                LoadHubNode hub = graph.add(new LoadHubNode(receiver, wordKind));
-                Stamp resultStamp = StampFactory.declaredNonNull(lookupJavaType(Class.class));
-                FloatingReadNode result = graph.unique(new FloatingReadNode(hub, LocationNode.create(LocationNode.FINAL_LOCATION, Kind.Object, config.classMirrorOffset, graph), null, resultStamp));
-                ReturnNode ret = graph.add(new ReturnNode(result));
-                graph.start().setNext(hub);
-                hub.setNext(ret);
-                return graph;
-            }
-        } else if (holder.toJava() == Class.class) {
-            if (fullName.equals("getModifiers()I")) {
-                StructuredGraph graph = new StructuredGraph();
-                LocalNode receiver = graph.unique(new LocalNode(0, StampFactory.objectNonNull()));
-                SafeReadNode klass = safeRead(graph, wordKind, receiver, config.klassOffset, StampFactory.forKind(wordKind), INVALID_GRAPH_ID);
-                graph.start().setNext(klass);
-                LocationNode location = LocationNode.create(LocationNode.FINAL_LOCATION, Kind.Int, config.klassModifierFlagsOffset, graph);
-                FloatingReadNode readModifiers = graph.unique(new FloatingReadNode(klass, location, null, StampFactory.intValue()));
-                CompareNode isZero = CompareNode.createCompareNode(Condition.EQ, klass, ConstantNode.defaultForKind(wordKind, graph));
-                GuardNode guard = graph.unique(new GuardNode(isZero, graph.start(), NullCheckException, InvalidateReprofile, true, INVALID_GRAPH_ID));
-                readModifiers.dependencies().add(guard);
-                ReturnNode ret = graph.add(new ReturnNode(readModifiers));
-                klass.setNext(ret);
-                return graph;
-            }
-        } else if (holder.toJava() == Thread.class) {
-            if (fullName.equals("currentThread()Ljava/lang/Thread;")) {
-                StructuredGraph graph = new StructuredGraph();
-                ReturnNode ret = graph.add(new ReturnNode(graph.unique(new CurrentThread(config.threadObjectOffset, this))));
-                graph.start().setNext(ret);
-                return graph;
-            }
-        }
-        return null;
-    }
-
-    private static SafeReadNode safeRead(Graph graph, Kind kind, ValueNode value, int offset, Stamp stamp, long leafGraphId) {
-        return graph.add(new SafeReadNode(value, LocationNode.create(LocationNode.FINAL_LOCATION, kind, offset, graph), stamp, leafGraphId));
-    }
-
-    public ResolvedJavaType lookupJavaType(Class<?> clazz) {
-        return HotSpotResolvedJavaType.fromClass(clazz);
-    }
-
-    public Object lookupCallTarget(Object target) {
-        if (target instanceof HotSpotRuntimeCall) {
-            return ((HotSpotRuntimeCall) target).address;
-        }
-        return target;
-    }
-
-    public RuntimeCall lookupRuntimeCall(Descriptor descriptor) {
-        assert runtimeCalls.containsKey(descriptor) : descriptor;
-        return runtimeCalls.get(descriptor);
-    }
-
-    public ResolvedJavaMethod lookupJavaMethod(Method reflectionMethod) {
-        CompilerToVM c2vm = graalRuntime.getCompilerToVM();
-        HotSpotResolvedJavaType[] resultHolder = {null};
-        long metaspaceMethod = c2vm.getMetaspaceMethod(reflectionMethod, resultHolder);
-        assert metaspaceMethod != 0L;
-        return resultHolder[0].createMethod(metaspaceMethod);
-    }
-
-    @Override
-    public ResolvedJavaField lookupJavaField(Field reflectionField) {
-        return graalRuntime.getCompilerToVM().getJavaField(reflectionField);
-    }
-
-    private static HotSpotCodeInfo makeInfo(ResolvedJavaMethod method, CompilationResult compResult, CodeInfo[] info) {
-        HotSpotCodeInfo hsInfo = null;
-        if (info != null && info.length > 0) {
-            hsInfo = new HotSpotCodeInfo(compResult, (HotSpotResolvedJavaMethod) method);
-            info[0] = hsInfo;
-        }
-        return hsInfo;
-    }
-
-    public void installMethod(HotSpotResolvedJavaMethod method, int entryBCI, CompilationResult compResult, CodeInfo[] info) {
-        HotSpotCodeInfo hsInfo = makeInfo(method, compResult, info);
-        graalRuntime.getCompilerToVM().installCode(new HotSpotCompilationResult(method, entryBCI, compResult), null, hsInfo);
-    }
-
-    @Override
-    public InstalledCode addMethod(ResolvedJavaMethod method, CompilationResult compResult, CodeInfo[] info) {
-        HotSpotCodeInfo hsInfo = makeInfo(method, compResult, info);
-        HotSpotResolvedJavaMethod hotspotMethod = (HotSpotResolvedJavaMethod) method;
-        return graalRuntime.getCompilerToVM().installCode(new HotSpotCompilationResult(hotspotMethod, -1, compResult), new HotSpotInstalledCode(hotspotMethod), hsInfo);
-    }
-
-    @Override
-    public int encodeDeoptActionAndReason(DeoptimizationAction action, DeoptimizationReason reason) {
-        final int actionShift = 0;
-        final int reasonShift = 3;
-
-        int actionValue = convertDeoptAction(action);
-        int reasonValue = convertDeoptReason(reason);
-        return (~(((reasonValue) << reasonShift) + ((actionValue) << actionShift)));
-    }
-
-    public int convertDeoptAction(DeoptimizationAction action) {
-        // This must be kept in sync with the DeoptAction enum defined in deoptimization.hpp
-        switch(action) {
-            case None: return 0;
-            case RecompileIfTooManyDeopts: return 1;
-            case InvalidateReprofile: return 2;
-            case InvalidateRecompile: return 3;
-            case InvalidateStopCompiling: return 4;
-            default: throw GraalInternalError.shouldNotReachHere();
-        }
-    }
-
-    public int convertDeoptReason(DeoptimizationReason reason) {
-        // This must be kept in sync with the DeoptReason enum defined in deoptimization.hpp
-        switch(reason) {
-            case None: return 0;
-            case NullCheckException: return 1;
-            case BoundsCheckException: return 2;
-            case ClassCastException: return 3;
-            case ArrayStoreException: return 4;
-            case UnreachedCode: return 5;
-            case TypeCheckedInliningViolated: return 6;
-            case OptimizedTypeCheckViolated: return 7;
-            case NotCompiledExceptionHandler: return 8;
-            case Unresolved: return 9;
-            case JavaSubroutineMismatch: return 10;
-            case ArithmeticException: return 11;
-            case RuntimeConstraint: return 12;
-            default: throw GraalInternalError.shouldNotReachHere();
-        }
-    }
-
-    public boolean needsDataPatch(Constant constant) {
-        return constant.getPrimitiveAnnotation() instanceof HotSpotResolvedJavaType;
-    }
-}
+/*
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.meta;
+
+import static com.oracle.graal.api.code.DeoptimizationAction.*;
+import static com.oracle.graal.api.code.MemoryBarriers.*;
+import static com.oracle.graal.api.meta.DeoptimizationReason.*;
+import static com.oracle.graal.api.meta.Value.*;
+import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
+import static com.oracle.graal.hotspot.snippets.SystemSnippets.*;
+import static com.oracle.graal.java.GraphBuilderPhase.*;
+import static com.oracle.graal.nodes.StructuredGraph.*;
+import static com.oracle.graal.nodes.UnwindNode.*;
+import static com.oracle.graal.nodes.java.RegisterFinalizerNode.*;
+import static com.oracle.graal.snippets.Log.*;
+import static com.oracle.graal.snippets.MathSnippetsX86.*;
+
+import java.lang.reflect.*;
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.code.CodeUtil.RefMapFormatter;
+import com.oracle.graal.api.code.CompilationResult.Call;
+import com.oracle.graal.api.code.CompilationResult.DataPatch;
+import com.oracle.graal.api.code.CompilationResult.Mark;
+import com.oracle.graal.api.code.CompilationResult.Safepoint;
+import com.oracle.graal.api.code.Register.RegisterFlag;
+import com.oracle.graal.api.code.RuntimeCall.Descriptor;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.hotspot.*;
+import com.oracle.graal.hotspot.bridge.*;
+import com.oracle.graal.hotspot.nodes.*;
+import com.oracle.graal.hotspot.phases.*;
+import com.oracle.graal.hotspot.snippets.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.extended.*;
+import com.oracle.graal.nodes.java.*;
+import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind;
+import com.oracle.graal.nodes.spi.*;
+import com.oracle.graal.nodes.type.*;
+import com.oracle.graal.phases.*;
+import com.oracle.graal.printer.*;
+import com.oracle.graal.snippets.*;
+
+/**
+ * HotSpot implementation of {@link GraalCodeCacheProvider}.
+ */
+public abstract class HotSpotRuntime implements GraalCodeCacheProvider {
+    public final HotSpotVMConfig config;
+
+    protected final RegisterConfig regConfig;
+    protected final RegisterConfig globalStubRegConfig;
+    protected final HotSpotGraalRuntime graalRuntime;
+
+    private CheckCastSnippets.Templates checkcastSnippets;
+    private InstanceOfSnippets.Templates instanceofSnippets;
+    private NewObjectSnippets.Templates newObjectSnippets;
+    private MonitorSnippets.Templates monitorSnippets;
+
+    private final Map<Descriptor, RuntimeCall> runtimeCalls = new HashMap<>();
+
+    protected Value ret(Kind kind) {
+        if (kind.isVoid()) {
+            return ILLEGAL;
+        }
+        return globalStubRegConfig.getReturnRegister(kind).asValue(kind);
+    }
+
+    protected Value arg(int index, Kind kind) {
+        if (kind.isFloat() || kind.isDouble()) {
+            return globalStubRegConfig.getCallingConventionRegisters(CallingConvention.Type.RuntimeCall, RegisterFlag.FPU)[index].asValue(kind);
+        }
+        return globalStubRegConfig.getCallingConventionRegisters(CallingConvention.Type.RuntimeCall, RegisterFlag.CPU)[index].asValue(kind);
+    }
+
+    protected Value scratch(Kind kind) {
+        return globalStubRegConfig.getScratchRegister().asValue(kind);
+    }
+
+    public HotSpotRuntime(HotSpotVMConfig config, HotSpotGraalRuntime graalRuntime) {
+        this.config = config;
+        this.graalRuntime = graalRuntime;
+        regConfig = createRegisterConfig(false);
+        globalStubRegConfig = createRegisterConfig(true);
+
+        addRuntimeCall(UNWIND_EXCEPTION, config.unwindExceptionStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Void),
+                        /* arg0: exception */ arg(0, Kind.Object));
+
+        addRuntimeCall(OnStackReplacementPhase.OSR_MIGRATION_END, config.osrMigrationEndStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Void),
+                        /* arg0:      long */ arg(0, Kind.Long));
+
+        addRuntimeCall(REGISTER_FINALIZER, config.registerFinalizerStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Void),
+                        /* arg0:    object */ arg(0, Kind.Object));
+
+        addRuntimeCall(CREATE_NULL_POINTER_EXCEPTION, config.createNullPointerExceptionStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Object));
+
+        addRuntimeCall(CREATE_OUT_OF_BOUNDS_EXCEPTION, config.createOutOfBoundsExceptionStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Object),
+                        /* arg0:     index */ arg(0, Kind.Int));
+
+        addRuntimeCall(JAVA_TIME_MILLIS, config.javaTimeMillisStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Long));
+
+        addRuntimeCall(JAVA_TIME_NANOS, config.javaTimeNanosStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Long));
+
+        addRuntimeCall(ARITHMETIC_SIN, config.arithmeticSinStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Double),
+                        /* arg0:     index */ arg(0, Kind.Double));
+
+        addRuntimeCall(ARITHMETIC_COS, config.arithmeticCosStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Double),
+                        /* arg0:     index */ arg(0, Kind.Double));
+
+        addRuntimeCall(ARITHMETIC_TAN, config.arithmeticTanStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Double),
+                        /* arg0:     index */ arg(0, Kind.Double));
+
+        addRuntimeCall(LOG_PRIMITIVE, config.logPrimitiveStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Void),
+                        /* arg0:  typeChar */ arg(0, Kind.Int),
+                        /* arg1:     value */ arg(1, Kind.Long),
+                        /* arg2:   newline */ arg(2, Kind.Boolean));
+
+        addRuntimeCall(LOG_PRINTF, config.logPrintfStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Void),
+                        /* arg0:    format */ arg(0, Kind.Object),
+                        /* arg1:     value */ arg(1, Kind.Long));
+
+        addRuntimeCall(LOG_OBJECT, config.logObjectStub,
+                        /*           temps */ null,
+                        /*             ret */ ret(Kind.Void),
+                        /* arg0:    object */ arg(0, Kind.Object),
+                        /* arg1:     flags */ arg(1, Kind.Int));
+    }
+
+
+    /**
+     * Registers the details for linking a runtime call.
+     *
+     * @param descriptor name and signature of the call
+     * @param address target address of the call
+     * @param tempRegs temporary registers used (and killed) by the call (null if none)
+     * @param ret where the call returns its result
+     * @param args where arguments are passed to the call
+     */
+    protected void addRuntimeCall(Descriptor descriptor, long address, Register[] tempRegs, Value ret, Value... args) {
+        Value[] temps = tempRegs == null || tempRegs.length == 0 ? Value.NONE : new Value[tempRegs.length];
+        for (int i = 0; i < temps.length; i++) {
+            temps[i] = tempRegs[i].asValue();
+        }
+        Kind retKind = ret.getKind();
+        if (retKind == Kind.Illegal) {
+            retKind = Kind.Void;
+        }
+        assert retKind.equals(descriptor.getResultKind()) : descriptor + " incompatible with result location " + ret;
+        Kind[] argKinds = descriptor.getArgumentKinds();
+        assert argKinds.length == args.length : descriptor + " incompatible with number of argument locations: " + args.length;
+        for (int i = 0; i < argKinds.length; i++) {
+            assert argKinds[i].equals(args[i].getKind()) : descriptor + " incompatible with argument location " + i + ": " + args[i];
+        }
+        HotSpotRuntimeCall runtimeCall = new HotSpotRuntimeCall(descriptor, address, new CallingConvention(temps, 0, ret, args), graalRuntime.getCompilerToVM());
+        runtimeCalls.put(descriptor, runtimeCall);
+    }
+
+    protected abstract RegisterConfig createRegisterConfig(boolean globalStubConfig);
+
+    public void installSnippets(SnippetInstaller installer, Assumptions assumptions) {
+        installer.install(SystemSnippets.class);
+        installer.install(UnsafeSnippets.class);
+        installer.install(ArrayCopySnippets.class);
+
+        installer.install(CheckCastSnippets.class);
+        installer.install(InstanceOfSnippets.class);
+        installer.install(NewObjectSnippets.class);
+        installer.install(MonitorSnippets.class);
+
+        checkcastSnippets = new CheckCastSnippets.Templates(this, assumptions, graalRuntime.getTarget());
+        instanceofSnippets = new InstanceOfSnippets.Templates(this, assumptions, graalRuntime.getTarget());
+        newObjectSnippets = new NewObjectSnippets.Templates(this, assumptions, graalRuntime.getTarget(), config.useTLAB);
+        monitorSnippets = new MonitorSnippets.Templates(this, assumptions, graalRuntime.getTarget(), config.useFastLocking);
+    }
+
+
+    public HotSpotGraalRuntime getGraalRuntime() {
+        return graalRuntime;
+    }
+
+    /**
+     * Gets the register holding the current thread.
+     */
+    public abstract Register threadRegister();
+
+    /**
+     * Gets the stack pointer register.
+     */
+    public abstract Register stackPointerRegister();
+
+    @Override
+    public String disassemble(CodeInfo info, CompilationResult tm) {
+        byte[] code = info.getCode();
+        TargetDescription target = graalRuntime.getTarget();
+        HexCodeFile hcf = new HexCodeFile(code, info.getStart(), target.arch.getName(), target.wordSize * 8);
+        if (tm != null) {
+            HexCodeFile.addAnnotations(hcf, tm.getAnnotations());
+            addExceptionHandlersComment(tm, hcf);
+            Register fp = regConfig.getFrameRegister();
+            RefMapFormatter slotFormatter = new RefMapFormatter(target.arch, target.wordSize, fp, 0);
+            for (Safepoint safepoint : tm.getSafepoints()) {
+                if (safepoint instanceof Call) {
+                    Call call = (Call) safepoint;
+                    if (call.debugInfo != null) {
+                        hcf.addComment(call.pcOffset + call.size, CodeUtil.append(new StringBuilder(100), call.debugInfo, slotFormatter).toString());
+                    }
+                    addOperandComment(hcf, call.pcOffset, "{" + getTargetName(call) + "}");
+                } else {
+                    if (safepoint.debugInfo != null) {
+                        hcf.addComment(safepoint.pcOffset, CodeUtil.append(new StringBuilder(100), safepoint.debugInfo, slotFormatter).toString());
+                    }
+                    addOperandComment(hcf, safepoint.pcOffset, "{safepoint}");
+                }
+            }
+            for (DataPatch site : tm.getDataReferences()) {
+                hcf.addOperandComment(site.pcOffset, "{" + site.constant + "}");
+            }
+            for (Mark mark : tm.getMarks()) {
+                hcf.addComment(mark.pcOffset, getMarkName(mark));
+            }
+        }
+        return hcf.toEmbeddedString();
+    }
+
+    /**
+     * Decodes a call target to a mnemonic if possible.
+     */
+    private String getTargetName(Call call) {
+        Field[] fields = config.getClass().getDeclaredFields();
+        for (Field f : fields) {
+            if (f.getName().endsWith("Stub")) {
+                f.setAccessible(true);
+                try {
+                    Object address = f.get(config);
+                    if (address.equals(call.target)) {
+                        return f.getName() + ":0x" + Long.toHexString((Long) address);
+                    }
+                } catch (Exception e) {
+                }
+            }
+        }
+        return String.valueOf(call.target);
+    }
+
+    /**
+     * Decodes a mark to a mnemonic if possible.
+     */
+    private static String getMarkName(Mark mark) {
+        Field[] fields = Marks.class.getDeclaredFields();
+        for (Field f : fields) {
+            if (Modifier.isStatic(f.getModifiers()) && f.getName().startsWith("MARK_")) {
+                f.setAccessible(true);
+                try {
+                    if (f.get(null).equals(mark.id)) {
+                        return f.getName();
+                    }
+                } catch (Exception e) {
+                }
+            }
+        }
+        return "MARK:" + mark.id;
+    }
+
+    private static void addExceptionHandlersComment(CompilationResult tm, HexCodeFile hcf) {
+        if (!tm.getExceptionHandlers().isEmpty()) {
+            String nl = HexCodeFile.NEW_LINE;
+            StringBuilder buf = new StringBuilder("------ Exception Handlers ------").append(nl);
+            for (CompilationResult.ExceptionHandler e : tm.getExceptionHandlers()) {
+                buf.append("    ").
+                    append(e.pcOffset).append(" -> ").
+                    append(e.handlerPos).
+                    append(nl);
+                hcf.addComment(e.pcOffset, "[exception -> " + e.handlerPos + "]");
+                hcf.addComment(e.handlerPos, "[exception handler for " + e.pcOffset + "]");
+            }
+            hcf.addComment(0, buf.toString());
+        }
+    }
+
+    private static void addOperandComment(HexCodeFile hcf, int pos, String comment) {
+        String oldValue = hcf.addOperandComment(pos, comment);
+        assert oldValue == null : "multiple comments for operand of instruction at " + pos + ": " + comment + ", " + oldValue;
+    }
+
+    @Override
+    public ResolvedJavaType lookupJavaType(Constant constant) {
+        if (!constant.getKind().isObject() || constant.isNull()) {
+            return null;
+        }
+        Object o = constant.asObject();
+        return HotSpotResolvedJavaType.fromClass(o.getClass());
+    }
+
+    @Override
+    public int getSizeOfLockData() {
+        return config.basicLockSize;
+    }
+
+    @Override
+    public boolean constantEquals(Constant x, Constant y) {
+        return x.equals(y);
+    }
+
+    @Override
+    public RegisterConfig lookupRegisterConfig(JavaMethod method) {
+        return regConfig;
+    }
+
+    /**
+     * HotSpots needs an area suitable for storing a program counter for temporary use during the deoptimization process.
+     */
+    @Override
+    public int getCustomStackAreaSize() {
+        return graalRuntime.getTarget().wordSize;
+    }
+
+    @Override
+    public int getMinimumOutgoingSize() {
+        return config.runtimeCallStackSize;
+    }
+
+    @Override
+    public int lookupArrayLength(Constant array) {
+        if (!array.getKind().isObject() || array.isNull() || !array.asObject().getClass().isArray()) {
+            throw new IllegalArgumentException(array + " is not an array");
+        }
+        return Array.getLength(array.asObject());
+    }
+
+    @Override
+    public void lower(Node n, LoweringTool tool) {
+        StructuredGraph graph = (StructuredGraph) n.graph();
+        Kind wordKind = graalRuntime.getTarget().wordKind;
+        if (n instanceof ArrayLengthNode) {
+            ArrayLengthNode arrayLengthNode = (ArrayLengthNode) n;
+            SafeReadNode safeReadArrayLength = safeReadArrayLength(arrayLengthNode.array(), StructuredGraph.INVALID_GRAPH_ID);
+            graph.replaceFixedWithFixed(arrayLengthNode, safeReadArrayLength);
+        } else if (n instanceof Invoke) {
+            Invoke invoke = (Invoke) n;
+            if (invoke.callTarget() instanceof MethodCallTargetNode) {
+                MethodCallTargetNode callTarget = invoke.methodCallTarget();
+                NodeInputList<ValueNode> parameters = callTarget.arguments();
+                ValueNode receiver = parameters.size() <= 0 ? null : parameters.get(0);
+                if (!callTarget.isStatic() && receiver.kind() == Kind.Object && !receiver.objectStamp().nonNull()) {
+                    invoke.node().dependencies().add(tool.createNullCheckGuard(receiver, invoke.leafGraphId()));
+                }
+                Kind[] signature = MetaUtil.signatureToKinds(callTarget.targetMethod().getSignature(), callTarget.isStatic() ? null : callTarget.targetMethod().getDeclaringClass().getKind());
+
+                AbstractCallTargetNode loweredCallTarget = null;
+                if (callTarget.invokeKind() == InvokeKind.Virtual &&
+                    GraalOptions.InlineVTableStubs &&
+                    (GraalOptions.AlwaysInlineVTableStubs || invoke.isPolymorphic())) {
+
+                    HotSpotResolvedJavaMethod hsMethod = (HotSpotResolvedJavaMethod) callTarget.targetMethod();
+                    if (!hsMethod.getDeclaringClass().isInterface()) {
+                        int vtableEntryOffset = hsMethod.vtableEntryOffset();
+                        if (vtableEntryOffset > 0) {
+                            // We use LocationNode.ANY_LOCATION for the reads that access the vtable entry and the compiled code entry
+                            // as HotSpot does not guarantee they are final values.
+                            assert vtableEntryOffset > 0;
+                            LoadHubNode hub = graph.add(new LoadHubNode(receiver, wordKind));
+                            ReadNode metaspaceMethod = graph.add(new ReadNode(hub, LocationNode.create(LocationNode.ANY_LOCATION, wordKind, vtableEntryOffset, graph), StampFactory.forKind(wordKind())));
+                            ReadNode compiledEntry = graph.add(new ReadNode(metaspaceMethod, LocationNode.create(LocationNode.ANY_LOCATION, wordKind, config.methodCompiledEntryOffset, graph), StampFactory.forKind(wordKind())));
+
+                            loweredCallTarget = graph.add(new HotSpotIndirectCallTargetNode(metaspaceMethod, compiledEntry, parameters, invoke.node().stamp(), signature, callTarget.targetMethod(), CallingConvention.Type.JavaCall));
+
+                            graph.addBeforeFixed(invoke.node(), hub);
+                            graph.addAfterFixed(hub, metaspaceMethod);
+                            graph.addAfterFixed(metaspaceMethod, compiledEntry);
+                        }
+                    }
+                }
+
+                if (loweredCallTarget == null) {
+                    loweredCallTarget = graph.add(new HotSpotDirectCallTargetNode(parameters, invoke.node().stamp(), signature, callTarget.targetMethod(), CallingConvention.Type.JavaCall, callTarget.invokeKind()));
+                }
+                callTarget.replaceAndDelete(loweredCallTarget);
+            }
+        } else if (n instanceof LoadFieldNode) {
+            LoadFieldNode field = (LoadFieldNode) n;
+            int displacement = ((HotSpotResolvedJavaField) field.field()).offset();
+            assert field.kind() != Kind.Illegal;
+            ReadNode memoryRead = graph.add(new ReadNode(field.object(), LocationNode.create(field.field(), field.field().getKind(), displacement, graph), field.stamp()));
+            memoryRead.dependencies().add(tool.createNullCheckGuard(field.object(), field.leafGraphId()));
+            graph.replaceFixedWithFixed(field, memoryRead);
+            if (field.isVolatile()) {
+                MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_READ));
+                graph.addBeforeFixed(memoryRead, preMembar);
+                MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_READ));
+                graph.addAfterFixed(memoryRead, postMembar);
+            }
+        } else if (n instanceof StoreFieldNode) {
+            StoreFieldNode storeField = (StoreFieldNode) n;
+            HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) storeField.field();
+            WriteNode memoryWrite = graph.add(new WriteNode(storeField.object(), storeField.value(), LocationNode.create(field, field.getKind(), field.offset(), graph)));
+            memoryWrite.dependencies().add(tool.createNullCheckGuard(storeField.object(), storeField.leafGraphId()));
+            memoryWrite.setStateAfter(storeField.stateAfter());
+            graph.replaceFixedWithFixed(storeField, memoryWrite);
+            FixedWithNextNode last = memoryWrite;
+            if (field.getKind() == Kind.Object && !memoryWrite.value().objectStamp().alwaysNull()) {
+                FieldWriteBarrier writeBarrier = graph.add(new FieldWriteBarrier(memoryWrite.object()));
+                graph.addAfterFixed(memoryWrite, writeBarrier);
+                last = writeBarrier;
+            }
+            if (storeField.isVolatile()) {
+                MembarNode preMembar = graph.add(new MembarNode(JMM_PRE_VOLATILE_WRITE));
+                graph.addBeforeFixed(memoryWrite, preMembar);
+                MembarNode postMembar = graph.add(new MembarNode(JMM_POST_VOLATILE_WRITE));
+                graph.addAfterFixed(last, postMembar);
+            }
+        } else if (n instanceof CompareAndSwapNode) {
+            // Separate out GC barrier semantics
+            CompareAndSwapNode cas = (CompareAndSwapNode) n;
+            ValueNode expected = cas.expected();
+            if (expected.kind() == Kind.Object && !cas.newValue().objectStamp().alwaysNull()) {
+                ResolvedJavaType type = cas.object().objectStamp().type();
+                if (type != null && !type.isArrayClass() && type.toJava() != Object.class) {
+                    // Use a field write barrier since it's not an array store
+                    FieldWriteBarrier writeBarrier = graph.add(new FieldWriteBarrier(cas.object()));
+                    graph.addAfterFixed(cas, writeBarrier);
+                } else {
+                    // This may be an array store so use an array write barrier
+                    LocationNode location = IndexedLocationNode.create(LocationNode.ANY_LOCATION, cas.expected().kind(), cas.displacement(), cas.offset(), graph, false);
+                    graph.addAfterFixed(cas, graph.add(new ArrayWriteBarrier(cas.object(), location)));
+                }
+            }
+        } else if (n instanceof LoadIndexedNode) {
+            LoadIndexedNode loadIndexed = (LoadIndexedNode) n;
+            ValueNode boundsCheck = createBoundsCheck(loadIndexed, tool);
+            Kind elementKind = loadIndexed.elementKind();
+            LocationNode arrayLocation = createArrayLocation(graph, elementKind, loadIndexed.index());
+            ReadNode memoryRead = graph.add(new ReadNode(loadIndexed.array(), arrayLocation, loadIndexed.stamp()));
+            memoryRead.dependencies().add(boundsCheck);
+            graph.replaceFixedWithFixed(loadIndexed, memoryRead);
+        } else if (n instanceof StoreIndexedNode) {
+            StoreIndexedNode storeIndexed = (StoreIndexedNode) n;
+            ValueNode boundsCheck = createBoundsCheck(storeIndexed, tool);
+
+            Kind elementKind = storeIndexed.elementKind();
+            LocationNode arrayLocation = createArrayLocation(graph, elementKind, storeIndexed.index());
+            ValueNode value = storeIndexed.value();
+            ValueNode array = storeIndexed.array();
+            if (elementKind == Kind.Object && !value.objectStamp().alwaysNull()) {
+                // Store check!
+                ResolvedJavaType arrayType = array.objectStamp().type();
+                if (arrayType != null && array.objectStamp().isExactType()) {
+                    ResolvedJavaType elementType = arrayType.getComponentType();
+                    if (!MetaUtil.isJavaLangObject(elementType)) {
+                        CheckCastNode checkcast = graph.add(new CheckCastNode(elementType, value, null));
+                        graph.addBeforeFixed(storeIndexed, checkcast);
+                        value = checkcast;
+                    }
+                } else {
+                    LoadHubNode arrayClass = graph.add(new LoadHubNode(array, wordKind));
+                    LocationNode location = LocationNode.create(LocationNode.FINAL_LOCATION, wordKind, config.arrayClassElementOffset, graph);
+                    FloatingReadNode arrayElementKlass = graph.unique(new FloatingReadNode(arrayClass, location, null, StampFactory.forKind(wordKind())));
+                    CheckCastDynamicNode checkcast = graph.add(new CheckCastDynamicNode(arrayElementKlass, value));
+                    graph.addBeforeFixed(storeIndexed, checkcast);
+                    graph.addBeforeFixed(checkcast, arrayClass);
+                    value = checkcast;
+                }
+            }
+            WriteNode memoryWrite = graph.add(new WriteNode(array, value, arrayLocation));
+            memoryWrite.dependencies().add(boundsCheck);
+            memoryWrite.setStateAfter(storeIndexed.stateAfter());
+
+            graph.replaceFixedWithFixed(storeIndexed, memoryWrite);
+
+            if (elementKind == Kind.Object && !value.objectStamp().alwaysNull()) {
+                graph.addAfterFixed(memoryWrite, graph.add(new ArrayWriteBarrier(array, arrayLocation)));
+            }
+        } else if (n instanceof UnsafeLoadNode) {
+            UnsafeLoadNode load = (UnsafeLoadNode) n;
+            assert load.kind() != Kind.Illegal;
+            IndexedLocationNode location = IndexedLocationNode.create(LocationNode.ANY_LOCATION, load.loadKind(), load.displacement(), load.offset(), graph, false);
+            ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp()));
+            // An unsafe read must not floating outside its block as may float above an explicit null check on its object.
+            memoryRead.dependencies().add(BeginNode.prevBegin(load));
+            graph.replaceFixedWithFixed(load, memoryRead);
+        } else if (n instanceof UnsafeStoreNode) {
+            UnsafeStoreNode store = (UnsafeStoreNode) n;
+            IndexedLocationNode location = IndexedLocationNode.create(LocationNode.ANY_LOCATION, store.storeKind(), store.displacement(), store.offset(), graph, false);
+            ValueNode object = store.object();
+            WriteNode write = graph.add(new WriteNode(object, store.value(), location));
+            write.setStateAfter(store.stateAfter());
+            graph.replaceFixedWithFixed(store, write);
+            if (write.value().kind() == Kind.Object && !write.value().objectStamp().alwaysNull()) {
+                ResolvedJavaType type = object.objectStamp().type();
+                WriteBarrier writeBarrier;
+                if (type != null && !type.isArrayClass() && type.toJava() != Object.class) {
+                    // Use a field write barrier since it's not an array store
+                    writeBarrier = graph.add(new FieldWriteBarrier(object));
+                } else {
+                    // This may be an array store so use an array write barrier
+                    writeBarrier = graph.add(new ArrayWriteBarrier(object, location));
+                }
+                graph.addAfterFixed(write, writeBarrier);
+            }
+        } else if (n instanceof LoadHubNode) {
+            LoadHubNode loadHub = (LoadHubNode) n;
+            assert loadHub.kind() == wordKind;
+            LocationNode location = LocationNode.create(LocationNode.FINAL_LOCATION, wordKind, config.hubOffset, graph);
+            ValueNode object = loadHub.object();
+            assert !object.isConstant();
+            ValueNode guard = tool.createNullCheckGuard(object, StructuredGraph.INVALID_GRAPH_ID);
+            ReadNode hub = graph.add(new ReadNode(object, location, StampFactory.forKind(wordKind())));
+            hub.dependencies().add(guard);
+            graph.replaceFixed(loadHub, hub);
+        } else if (n instanceof CheckCastNode) {
+            checkcastSnippets.lower((CheckCastNode) n, tool);
+        } else if (n instanceof CheckCastDynamicNode) {
+            checkcastSnippets.lower((CheckCastDynamicNode) n);
+        } else if (n instanceof InstanceOfNode) {
+            instanceofSnippets.lower((InstanceOfNode) n, tool);
+        } else if (n instanceof NewInstanceNode) {
+            newObjectSnippets.lower((NewInstanceNode) n, tool);
+        } else if (n instanceof NewArrayNode) {
+            newObjectSnippets.lower((NewArrayNode) n, tool);
+        } else if (n instanceof MonitorEnterNode) {
+            monitorSnippets.lower((MonitorEnterNode) n, tool);
+        } else if (n instanceof MonitorExitNode) {
+            monitorSnippets.lower((MonitorExitNode) n, tool);
+        } else if (n instanceof TLABAllocateNode) {
+            newObjectSnippets.lower((TLABAllocateNode) n, tool);
+        } else if (n instanceof InitializeObjectNode) {
+            newObjectSnippets.lower((InitializeObjectNode) n, tool);
+        } else if (n instanceof InitializeArrayNode) {
+            newObjectSnippets.lower((InitializeArrayNode) n, tool);
+        } else if (n instanceof NewMultiArrayNode) {
+            newObjectSnippets.lower((NewMultiArrayNode) n, tool);
+        } else {
+            assert false : "Node implementing Lowerable not handled: " + n;
+            throw GraalInternalError.shouldNotReachHere();
+        }
+    }
+
+    private static IndexedLocationNode createArrayLocation(Graph graph, Kind elementKind, ValueNode index) {
+        return IndexedLocationNode.create(LocationNode.getArrayLocation(elementKind), elementKind, elementKind.getArrayBaseOffset(), index, graph, true);
+    }
+
+    private SafeReadNode safeReadArrayLength(ValueNode array, long leafGraphId) {
+        return safeRead(array.graph(), Kind.Int, array, config.arrayLengthOffset, StampFactory.positiveInt(), leafGraphId);
+    }
+
+    private static ValueNode createBoundsCheck(AccessIndexedNode n, LoweringTool tool) {
+        StructuredGraph graph = (StructuredGraph) n.graph();
+        ArrayLengthNode arrayLength = graph.add(new ArrayLengthNode(n.array()));
+        ValueNode guard = tool.createGuard(graph.unique(new IntegerBelowThanNode(n.index(), arrayLength)), BoundsCheckException, InvalidateReprofile, n.leafGraphId());
+
+        graph.addBeforeFixed(n, arrayLength);
+        return guard;
+    }
+
+    @Override
+    public StructuredGraph intrinsicGraph(ResolvedJavaMethod caller, int bci, ResolvedJavaMethod method, List<? extends Node> parameters) {
+        ResolvedJavaType holder = method.getDeclaringClass();
+        String fullName = method.getName() + ((HotSpotSignature) method.getSignature()).asString();
+        Kind wordKind = graalRuntime.getTarget().wordKind;
+        if (holder.toJava() == Object.class) {
+            if (fullName.equals("getClass()Ljava/lang/Class;")) {
+                ValueNode obj = (ValueNode) parameters.get(0);
+                ObjectStamp stamp = (ObjectStamp) obj.stamp();
+                if (stamp.nonNull() && stamp.isExactType()) {
+                    StructuredGraph graph = new StructuredGraph();
+                    ValueNode result = ConstantNode.forObject(stamp.type().toJava(), this, graph);
+                    ReturnNode ret = graph.add(new ReturnNode(result));
+                    graph.start().setNext(ret);
+                    return graph;
+                }
+                StructuredGraph graph = new StructuredGraph();
+                LocalNode receiver = graph.unique(new LocalNode(0, StampFactory.objectNonNull()));
+                LoadHubNode hub = graph.add(new LoadHubNode(receiver, wordKind));
+                Stamp resultStamp = StampFactory.declaredNonNull(lookupJavaType(Class.class));
+                FloatingReadNode result = graph.unique(new FloatingReadNode(hub, LocationNode.create(LocationNode.FINAL_LOCATION, Kind.Object, config.classMirrorOffset, graph), null, resultStamp));
+                ReturnNode ret = graph.add(new ReturnNode(result));
+                graph.start().setNext(hub);
+                hub.setNext(ret);
+                return graph;
+            }
+        } else if (holder.toJava() == Class.class) {
+            if (fullName.equals("getModifiers()I")) {
+                StructuredGraph graph = new StructuredGraph();
+                LocalNode receiver = graph.unique(new LocalNode(0, StampFactory.objectNonNull()));
+                SafeReadNode klass = safeRead(graph, wordKind, receiver, config.klassOffset, StampFactory.forKind(wordKind), INVALID_GRAPH_ID);
+                graph.start().setNext(klass);
+                LocationNode location = LocationNode.create(LocationNode.FINAL_LOCATION, Kind.Int, config.klassModifierFlagsOffset, graph);
+                FloatingReadNode readModifiers = graph.unique(new FloatingReadNode(klass, location, null, StampFactory.intValue()));
+                CompareNode isZero = CompareNode.createCompareNode(Condition.EQ, klass, ConstantNode.defaultForKind(wordKind, graph));
+                GuardNode guard = graph.unique(new GuardNode(isZero, graph.start(), NullCheckException, InvalidateReprofile, true, INVALID_GRAPH_ID));
+                readModifiers.dependencies().add(guard);
+                ReturnNode ret = graph.add(new ReturnNode(readModifiers));
+                klass.setNext(ret);
+                return graph;
+            }
+        } else if (holder.toJava() == Thread.class) {
+            if (fullName.equals("currentThread()Ljava/lang/Thread;")) {
+                StructuredGraph graph = new StructuredGraph();
+                ReturnNode ret = graph.add(new ReturnNode(graph.unique(new CurrentThread(config.threadObjectOffset, this))));
+                graph.start().setNext(ret);
+                return graph;
+            }
+        }
+        return null;
+    }
+
+    private static SafeReadNode safeRead(Graph graph, Kind kind, ValueNode value, int offset, Stamp stamp, long leafGraphId) {
+        return graph.add(new SafeReadNode(value, LocationNode.create(LocationNode.FINAL_LOCATION, kind, offset, graph), stamp, leafGraphId));
+    }
+
+    public ResolvedJavaType lookupJavaType(Class<?> clazz) {
+        return HotSpotResolvedJavaType.fromClass(clazz);
+    }
+
+    public Object lookupCallTarget(Object target) {
+        if (target instanceof HotSpotRuntimeCall) {
+            return ((HotSpotRuntimeCall) target).address;
+        }
+        return target;
+    }
+
+    public RuntimeCall lookupRuntimeCall(Descriptor descriptor) {
+        assert runtimeCalls.containsKey(descriptor) : descriptor;
+        return runtimeCalls.get(descriptor);
+    }
+
+    public ResolvedJavaMethod lookupJavaMethod(Method reflectionMethod) {
+        CompilerToVM c2vm = graalRuntime.getCompilerToVM();
+        HotSpotResolvedJavaType[] resultHolder = {null};
+        long metaspaceMethod = c2vm.getMetaspaceMethod(reflectionMethod, resultHolder);
+        assert metaspaceMethod != 0L;
+        return resultHolder[0].createMethod(metaspaceMethod);
+    }
+
+    @Override
+    public ResolvedJavaField lookupJavaField(Field reflectionField) {
+        return graalRuntime.getCompilerToVM().getJavaField(reflectionField);
+    }
+
+    private static HotSpotCodeInfo makeInfo(ResolvedJavaMethod method, CompilationResult compResult, CodeInfo[] info) {
+        HotSpotCodeInfo hsInfo = null;
+        if (info != null && info.length > 0) {
+            hsInfo = new HotSpotCodeInfo(compResult, (HotSpotResolvedJavaMethod) method);
+            info[0] = hsInfo;
+        }
+        return hsInfo;
+    }
+
+    public void installMethod(HotSpotResolvedJavaMethod method, int entryBCI, CompilationResult compResult, CodeInfo[] info) {
+        HotSpotCodeInfo hsInfo = makeInfo(method, compResult, info);
+        graalRuntime.getCompilerToVM().installCode(new HotSpotCompilationResult(method, entryBCI, compResult), null, hsInfo);
+    }
+
+    @Override
+    public InstalledCode addMethod(ResolvedJavaMethod method, CompilationResult compResult, CodeInfo[] info) {
+        HotSpotCodeInfo hsInfo = makeInfo(method, compResult, info);
+        HotSpotResolvedJavaMethod hotspotMethod = (HotSpotResolvedJavaMethod) method;
+        return graalRuntime.getCompilerToVM().installCode(new HotSpotCompilationResult(hotspotMethod, -1, compResult), new HotSpotInstalledCode(hotspotMethod), hsInfo);
+    }
+
+    @Override
+    public int encodeDeoptActionAndReason(DeoptimizationAction action, DeoptimizationReason reason) {
+        final int actionShift = 0;
+        final int reasonShift = 3;
+
+        int actionValue = convertDeoptAction(action);
+        int reasonValue = convertDeoptReason(reason);
+        return (~(((reasonValue) << reasonShift) + ((actionValue) << actionShift)));
+    }
+
+    public int convertDeoptAction(DeoptimizationAction action) {
+        // This must be kept in sync with the DeoptAction enum defined in deoptimization.hpp
+        switch(action) {
+            case None: return 0;
+            case RecompileIfTooManyDeopts: return 1;
+            case InvalidateReprofile: return 2;
+            case InvalidateRecompile: return 3;
+            case InvalidateStopCompiling: return 4;
+            default: throw GraalInternalError.shouldNotReachHere();
+        }
+    }
+
+    public int convertDeoptReason(DeoptimizationReason reason) {
+        // This must be kept in sync with the DeoptReason enum defined in deoptimization.hpp
+        switch(reason) {
+            case None: return 0;
+            case NullCheckException: return 1;
+            case BoundsCheckException: return 2;
+            case ClassCastException: return 3;
+            case ArrayStoreException: return 4;
+            case UnreachedCode: return 5;
+            case TypeCheckedInliningViolated: return 6;
+            case OptimizedTypeCheckViolated: return 7;
+            case NotCompiledExceptionHandler: return 8;
+            case Unresolved: return 9;
+            case JavaSubroutineMismatch: return 10;
+            case ArithmeticException: return 11;
+            case RuntimeConstraint: return 12;
+            default: throw GraalInternalError.shouldNotReachHere();
+        }
+    }
+
+    public boolean needsDataPatch(Constant constant) {
+        return constant.getPrimitiveAnnotation() instanceof HotSpotResolvedJavaType;
+    }
+}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/CheckCastSnippets.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/CheckCastSnippets.java	Tue Nov 27 12:12:02 2012 +0100
@@ -236,8 +236,8 @@
         private final ResolvedJavaMethod secondary;
         private final ResolvedJavaMethod dynamic;
 
-        public Templates(CodeCacheProvider runtime, TargetDescription target) {
-            super(runtime, target, CheckCastSnippets.class);
+        public Templates(CodeCacheProvider runtime, Assumptions assumptions, TargetDescription target) {
+            super(runtime, assumptions, target, CheckCastSnippets.class);
             exact = snippet("checkcastExact", Object.class, Word.class, boolean.class);
             primary = snippet("checkcastPrimary", Word.class, Object.class, boolean.class, int.class);
             secondary = snippet("checkcastSecondary", Word.class, Object.class, Word[].class, boolean.class);
@@ -272,7 +272,7 @@
                 arguments = arguments("hub", hub).add("object", object).add("hints", hints);
             }
 
-            SnippetTemplate template = cache.get(key);
+            SnippetTemplate template = cache.get(key, assumptions);
             Debug.log("Lowering checkcast in %s: node=%s, template=%s, arguments=%s", graph, checkcast, template, arguments);
             template.instantiate(runtime, checkcast, DEFAULT_REPLACER, arguments);
         }
@@ -289,7 +289,7 @@
             Key key = new Key(dynamic).add("checkNull", checkNull);
             Arguments arguments = arguments("hub", hub).add("object", object);
 
-            SnippetTemplate template = cache.get(key);
+            SnippetTemplate template = cache.get(key, assumptions);
             Debug.log("Lowering dynamic checkcast in %s: node=%s, template=%s, arguments=%s", graph, checkcast, template, arguments);
             template.instantiate(runtime, checkcast, DEFAULT_REPLACER, arguments);
         }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/InstanceOfSnippets.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/InstanceOfSnippets.java	Tue Nov 27 12:12:02 2012 +0100
@@ -165,8 +165,8 @@
         private final ResolvedJavaMethod instanceofPrimary;
         private final ResolvedJavaMethod instanceofSecondary;
 
-        public Templates(CodeCacheProvider runtime, TargetDescription target) {
-            super(runtime, target, InstanceOfSnippets.class);
+        public Templates(CodeCacheProvider runtime, Assumptions assumptions, TargetDescription target) {
+            super(runtime, assumptions, target, InstanceOfSnippets.class);
             instanceofExact = snippet("instanceofExact", Object.class, Word.class, Object.class, Object.class, boolean.class);
             instanceofPrimary = snippet("instanceofPrimary", Word.class, Object.class, Object.class, Object.class, boolean.class, int.class);
             instanceofSecondary = snippet("instanceofSecondary", Word.class, Object.class, Object.class, Object.class, Word[].class, boolean.class);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/IntrinsifyArrayCopyPhase.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/IntrinsifyArrayCopyPhase.java	Tue Nov 27 12:12:02 2012 +0100
@@ -36,6 +36,7 @@
 
 public class IntrinsifyArrayCopyPhase extends Phase {
     private final GraalCodeCacheProvider runtime;
+    private final Assumptions assumptions;
     private ResolvedJavaMethod arrayCopy;
     private ResolvedJavaMethod byteArrayCopy;
     private ResolvedJavaMethod shortArrayCopy;
@@ -46,8 +47,9 @@
     private ResolvedJavaMethod doubleArrayCopy;
     private ResolvedJavaMethod objectArrayCopy;
 
-    public IntrinsifyArrayCopyPhase(GraalCodeCacheProvider runtime) {
+    public IntrinsifyArrayCopyPhase(GraalCodeCacheProvider runtime, Assumptions assumptions) {
         this.runtime = runtime;
+        this.assumptions = assumptions;
         try {
             byteArrayCopy = getArrayCopySnippet(runtime, byte.class);
             charArrayCopy = getArrayCopySnippet(runtime, char.class);
@@ -121,7 +123,7 @@
             }
         }
         if (GraalOptions.OptCanonicalizer && hits) {
-            new CanonicalizerPhase(null, runtime, null).apply(graph);
+            new CanonicalizerPhase(null, runtime, assumptions).apply(graph);
         }
     }
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/MonitorSnippets.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/MonitorSnippets.java	Tue Nov 27 12:12:02 2012 +0100
@@ -399,8 +399,8 @@
         private final ResolvedJavaMethod checkCounter;
         private final boolean useFastLocking;
 
-        public Templates(CodeCacheProvider runtime, TargetDescription target, boolean useFastLocking) {
-            super(runtime, target, MonitorSnippets.class);
+        public Templates(CodeCacheProvider runtime, Assumptions assumptions, TargetDescription target, boolean useFastLocking) {
+            super(runtime, assumptions, target, MonitorSnippets.class);
             monitorenter = snippet("monitorenter", Object.class, boolean.class, boolean.class);
             monitorexit = snippet("monitorexit", Object.class, boolean.class);
             monitorenterStub = snippet("monitorenterStub", Object.class, boolean.class, boolean.class);
@@ -435,7 +435,7 @@
             if (!eliminated) {
                 arguments.add("object", monitorenterNode.object());
             }
-            SnippetTemplate template = cache.get(key);
+            SnippetTemplate template = cache.get(key, assumptions);
             Map<Node, Node> nodes = template.instantiate(runtime, monitorenterNode, DEFAULT_REPLACER, arguments);
             for (Node n : nodes.values()) {
                 if (n instanceof BeginLockScopeNode) {
@@ -460,7 +460,7 @@
             if (!eliminated) {
                 arguments.add("object", monitorexitNode.object());
             }
-            SnippetTemplate template = cache.get(key);
+            SnippetTemplate template = cache.get(key, assumptions);
             Map<Node, Node> nodes = template.instantiate(runtime, monitorexitNode, DEFAULT_REPLACER, arguments);
             for (Node n : nodes.values()) {
                 if (n instanceof EndLockScopeNode) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/NewObjectSnippets.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/NewObjectSnippets.java	Tue Nov 27 12:12:02 2012 +0100
@@ -1,404 +1,404 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.snippets;
-
-import static com.oracle.graal.api.code.UnsignedMath.*;
-import static com.oracle.graal.hotspot.snippets.HotSpotSnippetUtils.*;
-import static com.oracle.graal.nodes.extended.UnsafeArrayCastNode.*;
-import static com.oracle.graal.nodes.extended.UnsafeCastNode.*;
-import static com.oracle.graal.snippets.Snippet.Varargs.*;
-import static com.oracle.graal.snippets.SnippetTemplate.*;
-import static com.oracle.graal.snippets.SnippetTemplate.Arguments.*;
-import static com.oracle.graal.snippets.nodes.DirectObjectStoreNode.*;
-import static com.oracle.graal.snippets.nodes.ExplodeLoopNode.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.hotspot.meta.*;
-import com.oracle.graal.hotspot.nodes.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.graal.nodes.type.*;
-import com.oracle.graal.phases.*;
-import com.oracle.graal.snippets.*;
-import com.oracle.graal.snippets.Snippet.ConstantParameter;
-import com.oracle.graal.snippets.Snippet.Parameter;
-import com.oracle.graal.snippets.Snippet.VarargsParameter;
-import com.oracle.graal.snippets.SnippetTemplate.AbstractTemplates;
-import com.oracle.graal.snippets.SnippetTemplate.Arguments;
-import com.oracle.graal.snippets.SnippetTemplate.Key;
-import com.oracle.graal.snippets.nodes.*;
-
-/**
- * Snippets used for implementing NEW, ANEWARRAY and NEWARRAY.
- */
-public class NewObjectSnippets implements SnippetsInterface {
-
-    @Snippet
-    public static Word allocate(@Parameter("size") int size) {
-        Word thread = thread();
-        Word top = loadWordFromWord(thread, threadTlabTopOffset());
-        Word end = loadWordFromWord(thread, threadTlabEndOffset());
-        Word available = end.minus(top);
-        if (available.aboveOrEqual(Word.fromInt(size))) {
-            Word newTop = top.plus(size);
-            storeObject(thread, 0, threadTlabTopOffset(), newTop);
-            return top;
-        }
-        return Word.zero();
-    }
-
-    @Snippet
-    public static Object initializeObject(
-                    @Parameter("memory") Word memory,
-                    @Parameter("hub") Word hub,
-                    @Parameter("prototypeMarkWord") Word prototypeMarkWord,
-                    @ConstantParameter("size") int size,
-                    @ConstantParameter("fillContents") boolean fillContents,
-                    @ConstantParameter("locked") boolean locked) {
-
-        Object result;
-        if (memory == Word.zero()) {
-            new_stub.inc();
-            result = NewInstanceStubCall.call(hub);
-        } else {
-            if (locked) {
-                formatObject(hub, size, memory, thread().or(biasedLockPattern()), fillContents);
-            } else {
-                formatObject(hub, size, memory, prototypeMarkWord, fillContents);
-            }
-            result = memory.toObject();
-        }
-        return unsafeCast(verifyOop(result), StampFactory.forNodeIntrinsic());
-    }
-
-    @Snippet
-    public static Object initializeObjectArray(
-                    @Parameter("memory") Word memory,
-                    @Parameter("hub") Word hub,
-                    @Parameter("length") int length,
-                    @Parameter("size") int size,
-                    @Parameter("prototypeMarkWord") Word prototypeMarkWord,
-                    @ConstantParameter("headerSize") int headerSize,
-                    @ConstantParameter("fillContents") boolean fillContents,
-                    @ConstantParameter("locked") boolean locked) {
-        if (locked) {
-            return initializeArray(memory, hub, length, size, thread().or(biasedLockPattern()), headerSize, true, fillContents);
-        } else {
-            return initializeArray(memory, hub, length, size, prototypeMarkWord, headerSize, true, fillContents);
-        }
-    }
-
-    @Snippet
-    public static Object initializePrimitiveArray(
-                    @Parameter("memory") Word memory,
-                    @Parameter("hub") Word hub,
-                    @Parameter("length") int length,
-                    @Parameter("size") int size,
-                    @Parameter("prototypeMarkWord") Word prototypeMarkWord,
-                    @ConstantParameter("headerSize") int headerSize,
-                    @ConstantParameter("fillContents") boolean fillContents,
-                    @ConstantParameter("locked") boolean locked) {
-        if (locked) {
-            return initializeArray(memory, hub, length, size, thread().or(biasedLockPattern()), headerSize, false, fillContents);
-        } else {
-            return initializeArray(memory, hub, length, size, prototypeMarkWord, headerSize, false, fillContents);
-        }
-    }
-
-    private static Object initializeArray(Word memory, Word hub, int length, int size, Word prototypeMarkWord, int headerSize, boolean isObjectArray, boolean fillContents) {
-        Object result;
-        if (memory == Word.zero()) {
-            if (isObjectArray) {
-                anewarray_stub.inc();
-            } else {
-                newarray_stub.inc();
-            }
-            result = NewArrayStubCall.call(isObjectArray, hub, length);
-        } else {
-            if (isObjectArray) {
-                anewarray_loopInit.inc();
-            } else {
-                newarray_loopInit.inc();
-            }
-            formatArray(hub, size, length, headerSize, memory, prototypeMarkWord, fillContents);
-            result = memory.toObject();
-        }
-        return unsafeArrayCast(verifyOop(result), length, StampFactory.forNodeIntrinsic());
-    }
-
-    /**
-     * Maximum array length for which fast path allocation is used.
-     */
-    private static final int MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH = 0x00FFFFFF;
-
-    @Snippet
-    public static Object allocateArrayAndInitialize(
-                    @Parameter("length") int length,
-                    @ConstantParameter("alignment") int alignment,
-                    @ConstantParameter("headerSize") int headerSize,
-                    @ConstantParameter("log2ElementSize") int log2ElementSize,
-                    @ConstantParameter("type") ResolvedJavaType type) {
-        if (!belowThan(length, MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH)) {
-            // This handles both negative array sizes and very large array sizes
-            DeoptimizeNode.deopt(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.RuntimeConstraint);
-        }
-        int size = getArraySize(length, alignment, headerSize, log2ElementSize);
-        Word memory = TLABAllocateNode.allocateVariableSize(size);
-        return InitializeArrayNode.initialize(memory, length, size, type, true, false);
-    }
-
-    public static int getArraySize(int length, int alignment, int headerSize, int log2ElementSize) {
-        int size = (length << log2ElementSize) + headerSize + (alignment - 1);
-        int mask = ~(alignment - 1);
-        return size & mask;
-    }
-
-    /**
-     * Calls the runtime stub for implementing MULTIANEWARRAY.
-     */
-    @Snippet
-    public static Object newmultiarray(
-                    @Parameter("hub") Word hub,
-                    @ConstantParameter("rank") int rank,
-                    @VarargsParameter("dimensions") int[] dimensions) {
-        Word dims = DimensionsNode.allocaDimsArray(rank);
-        ExplodeLoopNode.explodeLoop();
-        for (int i = 0; i < rank; i++) {
-            DirectObjectStoreNode.storeInt(dims, 0, i * 4, dimensions[i]);
-        }
-        return NewMultiArrayStubCall.call(hub, rank, dims);
-    }
-
-    /**
-     * Maximum size of an object whose body is initialized by a sequence of
-     * zero-stores to its fields. Larger objects have their bodies initialized
-     * in a loop.
-     */
-    private static final int MAX_UNROLLED_OBJECT_ZEROING_SIZE = 10 * wordSize();
-
-    /**
-     * Formats some allocated memory with an object header zeroes out the rest.
-     */
-    private static void formatObject(Word hub, int size, Word memory, Word compileTimePrototypeMarkWord, boolean fillContents) {
-        Word prototypeMarkWord = useBiasedLocking() ? loadWordFromWord(hub, prototypeMarkWordOffset()) : compileTimePrototypeMarkWord;
-        storeWord(memory, 0, markOffset(), prototypeMarkWord);
-        storeWord(memory, 0, hubOffset(), hub);
-        if (fillContents) {
-            if (size <= MAX_UNROLLED_OBJECT_ZEROING_SIZE) {
-                new_seqInit.inc();
-                explodeLoop();
-                for (int offset = 2 * wordSize(); offset < size; offset += wordSize()) {
-                    storeWord(memory, 0, offset, Word.zero());
-                }
-            } else {
-                new_loopInit.inc();
-                for (int offset = 2 * wordSize(); offset < size; offset += wordSize()) {
-                    storeWord(memory, 0, offset, Word.zero());
-                }
-            }
-        }
-    }
-
-    /**
-     * Formats some allocated memory with an object header zeroes out the rest.
-     */
-    private static void formatArray(Word hub, int size, int length, int headerSize, Word memory, Word prototypeMarkWord, boolean fillContents) {
-        storeWord(memory, 0, markOffset(), prototypeMarkWord);
-        storeWord(memory, 0, hubOffset(), hub);
-        storeInt(memory, 0, arrayLengthOffset(), length);
-        if (fillContents) {
-            for (int offset = headerSize; offset < size; offset += wordSize()) {
-                storeWord(memory, 0, offset, Word.zero());
-            }
-        }
-    }
-
-    public static class Templates extends AbstractTemplates<NewObjectSnippets> {
-
-        private final ResolvedJavaMethod allocate;
-        private final ResolvedJavaMethod initializeObject;
-        private final ResolvedJavaMethod initializeObjectArray;
-        private final ResolvedJavaMethod initializePrimitiveArray;
-        private final ResolvedJavaMethod allocateArrayAndInitialize;
-        private final ResolvedJavaMethod newmultiarray;
-        private final TargetDescription target;
-        private final boolean useTLAB;
-
-        public Templates(CodeCacheProvider runtime, TargetDescription target, boolean useTLAB) {
-            super(runtime, target, NewObjectSnippets.class);
-            this.target = target;
-            this.useTLAB = useTLAB;
-            allocate = snippet("allocate", int.class);
-            initializeObject = snippet("initializeObject", Word.class, Word.class, Word.class, int.class, boolean.class, boolean.class);
-            initializeObjectArray = snippet("initializeObjectArray", Word.class, Word.class, int.class, int.class, Word.class, int.class, boolean.class, boolean.class);
-            initializePrimitiveArray = snippet("initializePrimitiveArray", Word.class, Word.class, int.class, int.class, Word.class, int.class, boolean.class, boolean.class);
-            allocateArrayAndInitialize = snippet("allocateArrayAndInitialize", int.class, int.class, int.class, int.class, ResolvedJavaType.class);
-            newmultiarray = snippet("newmultiarray", Word.class, int.class, int[].class);
-        }
-
-        /**
-         * Lowers a {@link NewInstanceNode}.
-         */
-        @SuppressWarnings("unused")
-        public void lower(NewInstanceNode newInstanceNode, LoweringTool tool) {
-            StructuredGraph graph = (StructuredGraph) newInstanceNode.graph();
-            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) newInstanceNode.instanceClass();
-            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
-            int size = type.instanceSize();
-            assert (size % wordSize()) == 0;
-            assert size >= 0;
-
-            ValueNode memory;
-            if (!useTLAB) {
-                memory = ConstantNode.forConstant(new Constant(target.wordKind, 0L), runtime, graph);
-            } else {
-                ConstantNode sizeNode = ConstantNode.forInt(size, graph);
-                TLABAllocateNode tlabAllocateNode = graph.add(new TLABAllocateNode(sizeNode));
-                graph.addBeforeFixed(newInstanceNode, tlabAllocateNode);
-                memory = tlabAllocateNode;
-            }
-            InitializeObjectNode initializeNode = graph.add(new InitializeObjectNode(memory, type, newInstanceNode.fillContents(), newInstanceNode.locked()));
-            graph.replaceFixedWithFixed(newInstanceNode, initializeNode);
-        }
-
-        /**
-         * Lowers a {@link NewArrayNode}.
-         */
-        @SuppressWarnings("unused")
-        public void lower(NewArrayNode newArrayNode, LoweringTool tool) {
-            StructuredGraph graph = (StructuredGraph) newArrayNode.graph();
-            ValueNode lengthNode = newArrayNode.length();
-            TLABAllocateNode tlabAllocateNode;
-            ResolvedJavaType elementType = newArrayNode.elementType();
-            ResolvedJavaType arrayType = elementType.getArrayClass();
-            Kind elementKind = elementType.getKind();
-            final int alignment = target.wordSize;
-            final int headerSize = elementKind.getArrayBaseOffset();
-            final Integer length = lengthNode.isConstant() ? Integer.valueOf(lengthNode.asConstant().asInt()) : null;
-            int log2ElementSize = CodeUtil.log2(target.sizeInBytes(elementKind));
-            if (!useTLAB) {
-                ConstantNode zero = ConstantNode.forConstant(new Constant(target.wordKind, 0L), runtime, graph);
-                // value for 'size' doesn't matter as it isn't used since a stub call will be made anyway
-                // for both allocation and initialization - it just needs to be non-null
-                ConstantNode size = ConstantNode.forInt(-1, graph);
-                InitializeArrayNode initializeNode = graph.add(new InitializeArrayNode(zero, lengthNode, size, arrayType, newArrayNode.fillContents(), newArrayNode.locked()));
-                graph.replaceFixedWithFixed(newArrayNode, initializeNode);
-            } else if (length != null && belowThan(length, MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH)) {
-                // Calculate aligned size
-                int size = getArraySize(length, alignment, headerSize, log2ElementSize);
-                ConstantNode sizeNode = ConstantNode.forInt(size, graph);
-                tlabAllocateNode = graph.add(new TLABAllocateNode(sizeNode));
-                graph.addBeforeFixed(newArrayNode, tlabAllocateNode);
-                InitializeArrayNode initializeNode = graph.add(new InitializeArrayNode(tlabAllocateNode, lengthNode, sizeNode, arrayType, newArrayNode.fillContents(), newArrayNode.locked()));
-                graph.replaceFixedWithFixed(newArrayNode, initializeNode);
-            } else {
-                Key key = new Key(allocateArrayAndInitialize).
-                                add("alignment", alignment).
-                                add("headerSize", headerSize).
-                                add("log2ElementSize", log2ElementSize).
-                                add("type", arrayType);
-                Arguments arguments = new Arguments().add("length", lengthNode);
-                SnippetTemplate template = cache.get(key);
-                Debug.log("Lowering allocateArrayAndInitialize in %s: node=%s, template=%s, arguments=%s", graph, newArrayNode, template, arguments);
-                template.instantiate(runtime, newArrayNode, DEFAULT_REPLACER, arguments);
-            }
-        }
-
-        @SuppressWarnings("unused")
-        public void lower(TLABAllocateNode tlabAllocateNode, LoweringTool tool) {
-            StructuredGraph graph = (StructuredGraph) tlabAllocateNode.graph();
-            ValueNode size = tlabAllocateNode.size();
-            Key key = new Key(allocate);
-            Arguments arguments = arguments("size", size);
-            SnippetTemplate template = cache.get(key);
-            Debug.log("Lowering fastAllocate in %s: node=%s, template=%s, arguments=%s", graph, tlabAllocateNode, template, arguments);
-            template.instantiate(runtime, tlabAllocateNode, DEFAULT_REPLACER, arguments);
-        }
-
-        @SuppressWarnings("unused")
-        public void lower(InitializeObjectNode initializeNode, LoweringTool tool) {
-            StructuredGraph graph = (StructuredGraph) initializeNode.graph();
-            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) initializeNode.type();
-            assert !type.isArrayClass();
-            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
-            int size = type.instanceSize();
-            assert (size % wordSize()) == 0;
-            assert size >= 0;
-            Key key = new Key(initializeObject).add("size", size).add("fillContents", initializeNode.fillContents()).add("locked", initializeNode.locked());
-            ValueNode memory = initializeNode.memory();
-            Arguments arguments = arguments("memory", memory).add("hub", hub).add("prototypeMarkWord", type.prototypeMarkWord());
-            SnippetTemplate template = cache.get(key);
-            Debug.log("Lowering initializeObject in %s: node=%s, template=%s, arguments=%s", graph, initializeNode, template, arguments);
-            template.instantiate(runtime, initializeNode, DEFAULT_REPLACER, arguments);
-        }
-
-        @SuppressWarnings("unused")
-        public void lower(InitializeArrayNode initializeNode, LoweringTool tool) {
-            StructuredGraph graph = (StructuredGraph) initializeNode.graph();
-            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) initializeNode.type();
-            ResolvedJavaType elementType = type.getComponentType();
-            assert elementType != null;
-            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
-            Kind elementKind = elementType.getKind();
-            final int headerSize = elementKind.getArrayBaseOffset();
-            Key key = new Key(elementKind.isObject() ? initializeObjectArray : initializePrimitiveArray).add("headerSize", headerSize).add("fillContents", initializeNode.fillContents()).add("locked", initializeNode.locked());
-            ValueNode memory = initializeNode.memory();
-            Arguments arguments = arguments("memory", memory).add("hub", hub).add("prototypeMarkWord", type.prototypeMarkWord()).add("size", initializeNode.size()).add("length", initializeNode.length());
-            SnippetTemplate template = cache.get(key);
-            Debug.log("Lowering initializeObjectArray in %s: node=%s, template=%s, arguments=%s", graph, initializeNode, template, arguments);
-            template.instantiate(runtime, initializeNode, DEFAULT_REPLACER, arguments);
-        }
-
-        @SuppressWarnings("unused")
-        public void lower(NewMultiArrayNode newmultiarrayNode, LoweringTool tool) {
-            StructuredGraph graph = (StructuredGraph) newmultiarrayNode.graph();
-            int rank = newmultiarrayNode.dimensionCount();
-            ValueNode[] dims = new ValueNode[rank];
-            for (int i = 0; i < newmultiarrayNode.dimensionCount(); i++) {
-                dims[i] = newmultiarrayNode.dimension(i);
-            }
-            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) newmultiarrayNode.type();
-            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
-            Key key = new Key(newmultiarray).add("dimensions", vargargs(new int[rank], StampFactory.forKind(Kind.Int))).add("rank", rank);
-            Arguments arguments = arguments("dimensions", dims).add("hub", hub);
-            SnippetTemplate template = cache.get(key);
-            template.instantiate(runtime, newmultiarrayNode, DEFAULT_REPLACER, arguments);
-        }
-    }
-
-    private static final SnippetCounter.Group countersNew = GraalOptions.SnippetCounters ? new SnippetCounter.Group("NewInstance") : null;
-    private static final SnippetCounter new_seqInit = new SnippetCounter(countersNew, "tlabSeqInit", "TLAB alloc with unrolled zeroing");
-    private static final SnippetCounter new_loopInit = new SnippetCounter(countersNew, "tlabLoopInit", "TLAB alloc with zeroing in a loop");
-    private static final SnippetCounter new_stub = new SnippetCounter(countersNew, "stub", "alloc and zeroing via stub");
-
-    private static final SnippetCounter.Group countersNewPrimitiveArray = GraalOptions.SnippetCounters ? new SnippetCounter.Group("NewPrimitiveArray") : null;
-    private static final SnippetCounter newarray_loopInit = new SnippetCounter(countersNewPrimitiveArray, "tlabLoopInit", "TLAB alloc with zeroing in a loop");
-    private static final SnippetCounter newarray_stub = new SnippetCounter(countersNewPrimitiveArray, "stub", "alloc and zeroing via stub");
-
-    private static final SnippetCounter.Group countersNewObjectArray = GraalOptions.SnippetCounters ? new SnippetCounter.Group("NewObjectArray") : null;
-    private static final SnippetCounter anewarray_loopInit = new SnippetCounter(countersNewObjectArray, "tlabLoopInit", "TLAB alloc with zeroing in a loop");
-    private static final SnippetCounter anewarray_stub = new SnippetCounter(countersNewObjectArray, "stub", "alloc and zeroing via stub");
-}
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.snippets;
+
+import static com.oracle.graal.api.code.UnsignedMath.*;
+import static com.oracle.graal.hotspot.snippets.HotSpotSnippetUtils.*;
+import static com.oracle.graal.nodes.extended.UnsafeArrayCastNode.*;
+import static com.oracle.graal.nodes.extended.UnsafeCastNode.*;
+import static com.oracle.graal.snippets.Snippet.Varargs.*;
+import static com.oracle.graal.snippets.SnippetTemplate.*;
+import static com.oracle.graal.snippets.SnippetTemplate.Arguments.*;
+import static com.oracle.graal.snippets.nodes.DirectObjectStoreNode.*;
+import static com.oracle.graal.snippets.nodes.ExplodeLoopNode.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.hotspot.nodes.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.java.*;
+import com.oracle.graal.nodes.spi.*;
+import com.oracle.graal.nodes.type.*;
+import com.oracle.graal.phases.*;
+import com.oracle.graal.snippets.*;
+import com.oracle.graal.snippets.Snippet.ConstantParameter;
+import com.oracle.graal.snippets.Snippet.Parameter;
+import com.oracle.graal.snippets.Snippet.VarargsParameter;
+import com.oracle.graal.snippets.SnippetTemplate.AbstractTemplates;
+import com.oracle.graal.snippets.SnippetTemplate.Arguments;
+import com.oracle.graal.snippets.SnippetTemplate.Key;
+import com.oracle.graal.snippets.nodes.*;
+
+/**
+ * Snippets used for implementing NEW, ANEWARRAY and NEWARRAY.
+ */
+public class NewObjectSnippets implements SnippetsInterface {
+
+    @Snippet
+    public static Word allocate(@Parameter("size") int size) {
+        Word thread = thread();
+        Word top = loadWordFromWord(thread, threadTlabTopOffset());
+        Word end = loadWordFromWord(thread, threadTlabEndOffset());
+        Word available = end.minus(top);
+        if (available.aboveOrEqual(Word.fromInt(size))) {
+            Word newTop = top.plus(size);
+            storeObject(thread, 0, threadTlabTopOffset(), newTop);
+            return top;
+        }
+        return Word.zero();
+    }
+
+    @Snippet
+    public static Object initializeObject(
+                    @Parameter("memory") Word memory,
+                    @Parameter("hub") Word hub,
+                    @Parameter("prototypeMarkWord") Word prototypeMarkWord,
+                    @ConstantParameter("size") int size,
+                    @ConstantParameter("fillContents") boolean fillContents,
+                    @ConstantParameter("locked") boolean locked) {
+
+        Object result;
+        if (memory == Word.zero()) {
+            new_stub.inc();
+            result = NewInstanceStubCall.call(hub);
+        } else {
+            if (locked) {
+                formatObject(hub, size, memory, thread().or(biasedLockPattern()), fillContents);
+            } else {
+                formatObject(hub, size, memory, prototypeMarkWord, fillContents);
+            }
+            result = memory.toObject();
+        }
+        return unsafeCast(verifyOop(result), StampFactory.forNodeIntrinsic());
+    }
+
+    @Snippet
+    public static Object initializeObjectArray(
+                    @Parameter("memory") Word memory,
+                    @Parameter("hub") Word hub,
+                    @Parameter("length") int length,
+                    @Parameter("size") int size,
+                    @Parameter("prototypeMarkWord") Word prototypeMarkWord,
+                    @ConstantParameter("headerSize") int headerSize,
+                    @ConstantParameter("fillContents") boolean fillContents,
+                    @ConstantParameter("locked") boolean locked) {
+        if (locked) {
+            return initializeArray(memory, hub, length, size, thread().or(biasedLockPattern()), headerSize, true, fillContents);
+        } else {
+            return initializeArray(memory, hub, length, size, prototypeMarkWord, headerSize, true, fillContents);
+        }
+    }
+
+    @Snippet
+    public static Object initializePrimitiveArray(
+                    @Parameter("memory") Word memory,
+                    @Parameter("hub") Word hub,
+                    @Parameter("length") int length,
+                    @Parameter("size") int size,
+                    @Parameter("prototypeMarkWord") Word prototypeMarkWord,
+                    @ConstantParameter("headerSize") int headerSize,
+                    @ConstantParameter("fillContents") boolean fillContents,
+                    @ConstantParameter("locked") boolean locked) {
+        if (locked) {
+            return initializeArray(memory, hub, length, size, thread().or(biasedLockPattern()), headerSize, false, fillContents);
+        } else {
+            return initializeArray(memory, hub, length, size, prototypeMarkWord, headerSize, false, fillContents);
+        }
+    }
+
+    private static Object initializeArray(Word memory, Word hub, int length, int size, Word prototypeMarkWord, int headerSize, boolean isObjectArray, boolean fillContents) {
+        Object result;
+        if (memory == Word.zero()) {
+            if (isObjectArray) {
+                anewarray_stub.inc();
+            } else {
+                newarray_stub.inc();
+            }
+            result = NewArrayStubCall.call(isObjectArray, hub, length);
+        } else {
+            if (isObjectArray) {
+                anewarray_loopInit.inc();
+            } else {
+                newarray_loopInit.inc();
+            }
+            formatArray(hub, size, length, headerSize, memory, prototypeMarkWord, fillContents);
+            result = memory.toObject();
+        }
+        return unsafeArrayCast(verifyOop(result), length, StampFactory.forNodeIntrinsic());
+    }
+
+    /**
+     * Maximum array length for which fast path allocation is used.
+     */
+    private static final int MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH = 0x00FFFFFF;
+
+    @Snippet
+    public static Object allocateArrayAndInitialize(
+                    @Parameter("length") int length,
+                    @ConstantParameter("alignment") int alignment,
+                    @ConstantParameter("headerSize") int headerSize,
+                    @ConstantParameter("log2ElementSize") int log2ElementSize,
+                    @ConstantParameter("type") ResolvedJavaType type) {
+        if (!belowThan(length, MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH)) {
+            // This handles both negative array sizes and very large array sizes
+            DeoptimizeNode.deopt(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.RuntimeConstraint);
+        }
+        int size = getArraySize(length, alignment, headerSize, log2ElementSize);
+        Word memory = TLABAllocateNode.allocateVariableSize(size);
+        return InitializeArrayNode.initialize(memory, length, size, type, true, false);
+    }
+
+    public static int getArraySize(int length, int alignment, int headerSize, int log2ElementSize) {
+        int size = (length << log2ElementSize) + headerSize + (alignment - 1);
+        int mask = ~(alignment - 1);
+        return size & mask;
+    }
+
+    /**
+     * Calls the runtime stub for implementing MULTIANEWARRAY.
+     */
+    @Snippet
+    public static Object newmultiarray(
+                    @Parameter("hub") Word hub,
+                    @ConstantParameter("rank") int rank,
+                    @VarargsParameter("dimensions") int[] dimensions) {
+        Word dims = DimensionsNode.allocaDimsArray(rank);
+        ExplodeLoopNode.explodeLoop();
+        for (int i = 0; i < rank; i++) {
+            DirectObjectStoreNode.storeInt(dims, 0, i * 4, dimensions[i]);
+        }
+        return NewMultiArrayStubCall.call(hub, rank, dims);
+    }
+
+    /**
+     * Maximum size of an object whose body is initialized by a sequence of
+     * zero-stores to its fields. Larger objects have their bodies initialized
+     * in a loop.
+     */
+    private static final int MAX_UNROLLED_OBJECT_ZEROING_SIZE = 10 * wordSize();
+
+    /**
+     * Formats some allocated memory with an object header zeroes out the rest.
+     */
+    private static void formatObject(Word hub, int size, Word memory, Word compileTimePrototypeMarkWord, boolean fillContents) {
+        Word prototypeMarkWord = useBiasedLocking() ? loadWordFromWord(hub, prototypeMarkWordOffset()) : compileTimePrototypeMarkWord;
+        storeWord(memory, 0, markOffset(), prototypeMarkWord);
+        storeWord(memory, 0, hubOffset(), hub);
+        if (fillContents) {
+            if (size <= MAX_UNROLLED_OBJECT_ZEROING_SIZE) {
+                new_seqInit.inc();
+                explodeLoop();
+                for (int offset = 2 * wordSize(); offset < size; offset += wordSize()) {
+                    storeWord(memory, 0, offset, Word.zero());
+                }
+            } else {
+                new_loopInit.inc();
+                for (int offset = 2 * wordSize(); offset < size; offset += wordSize()) {
+                    storeWord(memory, 0, offset, Word.zero());
+                }
+            }
+        }
+    }
+
+    /**
+     * Formats some allocated memory with an object header zeroes out the rest.
+     */
+    private static void formatArray(Word hub, int size, int length, int headerSize, Word memory, Word prototypeMarkWord, boolean fillContents) {
+        storeWord(memory, 0, markOffset(), prototypeMarkWord);
+        storeWord(memory, 0, hubOffset(), hub);
+        storeInt(memory, 0, arrayLengthOffset(), length);
+        if (fillContents) {
+            for (int offset = headerSize; offset < size; offset += wordSize()) {
+                storeWord(memory, 0, offset, Word.zero());
+            }
+        }
+    }
+
+    public static class Templates extends AbstractTemplates<NewObjectSnippets> {
+
+        private final ResolvedJavaMethod allocate;
+        private final ResolvedJavaMethod initializeObject;
+        private final ResolvedJavaMethod initializeObjectArray;
+        private final ResolvedJavaMethod initializePrimitiveArray;
+        private final ResolvedJavaMethod allocateArrayAndInitialize;
+        private final ResolvedJavaMethod newmultiarray;
+        private final TargetDescription target;
+        private final boolean useTLAB;
+
+        public Templates(CodeCacheProvider runtime, Assumptions assumptions, TargetDescription target, boolean useTLAB) {
+            super(runtime, assumptions, target, NewObjectSnippets.class);
+            this.target = target;
+            this.useTLAB = useTLAB;
+            allocate = snippet("allocate", int.class);
+            initializeObject = snippet("initializeObject", Word.class, Word.class, Word.class, int.class, boolean.class, boolean.class);
+            initializeObjectArray = snippet("initializeObjectArray", Word.class, Word.class, int.class, int.class, Word.class, int.class, boolean.class, boolean.class);
+            initializePrimitiveArray = snippet("initializePrimitiveArray", Word.class, Word.class, int.class, int.class, Word.class, int.class, boolean.class, boolean.class);
+            allocateArrayAndInitialize = snippet("allocateArrayAndInitialize", int.class, int.class, int.class, int.class, ResolvedJavaType.class);
+            newmultiarray = snippet("newmultiarray", Word.class, int.class, int[].class);
+        }
+
+        /**
+         * Lowers a {@link NewInstanceNode}.
+         */
+        @SuppressWarnings("unused")
+        public void lower(NewInstanceNode newInstanceNode, LoweringTool tool) {
+            StructuredGraph graph = (StructuredGraph) newInstanceNode.graph();
+            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) newInstanceNode.instanceClass();
+            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
+            int size = type.instanceSize();
+            assert (size % wordSize()) == 0;
+            assert size >= 0;
+
+            ValueNode memory;
+            if (!useTLAB) {
+                memory = ConstantNode.forConstant(new Constant(target.wordKind, 0L), runtime, graph);
+            } else {
+                ConstantNode sizeNode = ConstantNode.forInt(size, graph);
+                TLABAllocateNode tlabAllocateNode = graph.add(new TLABAllocateNode(sizeNode));
+                graph.addBeforeFixed(newInstanceNode, tlabAllocateNode);
+                memory = tlabAllocateNode;
+            }
+            InitializeObjectNode initializeNode = graph.add(new InitializeObjectNode(memory, type, newInstanceNode.fillContents(), newInstanceNode.locked()));
+            graph.replaceFixedWithFixed(newInstanceNode, initializeNode);
+        }
+
+        /**
+         * Lowers a {@link NewArrayNode}.
+         */
+        @SuppressWarnings("unused")
+        public void lower(NewArrayNode newArrayNode, LoweringTool tool) {
+            StructuredGraph graph = (StructuredGraph) newArrayNode.graph();
+            ValueNode lengthNode = newArrayNode.length();
+            TLABAllocateNode tlabAllocateNode;
+            ResolvedJavaType elementType = newArrayNode.elementType();
+            ResolvedJavaType arrayType = elementType.getArrayClass();
+            Kind elementKind = elementType.getKind();
+            final int alignment = target.wordSize;
+            final int headerSize = elementKind.getArrayBaseOffset();
+            final Integer length = lengthNode.isConstant() ? Integer.valueOf(lengthNode.asConstant().asInt()) : null;
+            int log2ElementSize = CodeUtil.log2(target.sizeInBytes(elementKind));
+            if (!useTLAB) {
+                ConstantNode zero = ConstantNode.forConstant(new Constant(target.wordKind, 0L), runtime, graph);
+                // value for 'size' doesn't matter as it isn't used since a stub call will be made anyway
+                // for both allocation and initialization - it just needs to be non-null
+                ConstantNode size = ConstantNode.forInt(-1, graph);
+                InitializeArrayNode initializeNode = graph.add(new InitializeArrayNode(zero, lengthNode, size, arrayType, newArrayNode.fillContents(), newArrayNode.locked()));
+                graph.replaceFixedWithFixed(newArrayNode, initializeNode);
+            } else if (length != null && belowThan(length, MAX_ARRAY_FAST_PATH_ALLOCATION_LENGTH)) {
+                // Calculate aligned size
+                int size = getArraySize(length, alignment, headerSize, log2ElementSize);
+                ConstantNode sizeNode = ConstantNode.forInt(size, graph);
+                tlabAllocateNode = graph.add(new TLABAllocateNode(sizeNode));
+                graph.addBeforeFixed(newArrayNode, tlabAllocateNode);
+                InitializeArrayNode initializeNode = graph.add(new InitializeArrayNode(tlabAllocateNode, lengthNode, sizeNode, arrayType, newArrayNode.fillContents(), newArrayNode.locked()));
+                graph.replaceFixedWithFixed(newArrayNode, initializeNode);
+            } else {
+                Key key = new Key(allocateArrayAndInitialize).
+                                add("alignment", alignment).
+                                add("headerSize", headerSize).
+                                add("log2ElementSize", log2ElementSize).
+                                add("type", arrayType);
+                Arguments arguments = new Arguments().add("length", lengthNode);
+                SnippetTemplate template = cache.get(key, assumptions);
+                Debug.log("Lowering allocateArrayAndInitialize in %s: node=%s, template=%s, arguments=%s", graph, newArrayNode, template, arguments);
+                template.instantiate(runtime, newArrayNode, DEFAULT_REPLACER, arguments);
+            }
+        }
+
+        @SuppressWarnings("unused")
+        public void lower(TLABAllocateNode tlabAllocateNode, LoweringTool tool) {
+            StructuredGraph graph = (StructuredGraph) tlabAllocateNode.graph();
+            ValueNode size = tlabAllocateNode.size();
+            Key key = new Key(allocate);
+            Arguments arguments = arguments("size", size);
+            SnippetTemplate template = cache.get(key, assumptions);
+            Debug.log("Lowering fastAllocate in %s: node=%s, template=%s, arguments=%s", graph, tlabAllocateNode, template, arguments);
+            template.instantiate(runtime, tlabAllocateNode, DEFAULT_REPLACER, arguments);
+        }
+
+        @SuppressWarnings("unused")
+        public void lower(InitializeObjectNode initializeNode, LoweringTool tool) {
+            StructuredGraph graph = (StructuredGraph) initializeNode.graph();
+            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) initializeNode.type();
+            assert !type.isArrayClass();
+            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
+            int size = type.instanceSize();
+            assert (size % wordSize()) == 0;
+            assert size >= 0;
+            Key key = new Key(initializeObject).add("size", size).add("fillContents", initializeNode.fillContents()).add("locked", initializeNode.locked());
+            ValueNode memory = initializeNode.memory();
+            Arguments arguments = arguments("memory", memory).add("hub", hub).add("prototypeMarkWord", type.prototypeMarkWord());
+            SnippetTemplate template = cache.get(key, assumptions);
+            Debug.log("Lowering initializeObject in %s: node=%s, template=%s, arguments=%s", graph, initializeNode, template, arguments);
+            template.instantiate(runtime, initializeNode, DEFAULT_REPLACER, arguments);
+        }
+
+        @SuppressWarnings("unused")
+        public void lower(InitializeArrayNode initializeNode, LoweringTool tool) {
+            StructuredGraph graph = (StructuredGraph) initializeNode.graph();
+            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) initializeNode.type();
+            ResolvedJavaType elementType = type.getComponentType();
+            assert elementType != null;
+            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
+            Kind elementKind = elementType.getKind();
+            final int headerSize = elementKind.getArrayBaseOffset();
+            Key key = new Key(elementKind.isObject() ? initializeObjectArray : initializePrimitiveArray).add("headerSize", headerSize).add("fillContents", initializeNode.fillContents()).add("locked", initializeNode.locked());
+            ValueNode memory = initializeNode.memory();
+            Arguments arguments = arguments("memory", memory).add("hub", hub).add("prototypeMarkWord", type.prototypeMarkWord()).add("size", initializeNode.size()).add("length", initializeNode.length());
+            SnippetTemplate template = cache.get(key, assumptions);
+            Debug.log("Lowering initializeObjectArray in %s: node=%s, template=%s, arguments=%s", graph, initializeNode, template, arguments);
+            template.instantiate(runtime, initializeNode, DEFAULT_REPLACER, arguments);
+        }
+
+        @SuppressWarnings("unused")
+        public void lower(NewMultiArrayNode newmultiarrayNode, LoweringTool tool) {
+            StructuredGraph graph = (StructuredGraph) newmultiarrayNode.graph();
+            int rank = newmultiarrayNode.dimensionCount();
+            ValueNode[] dims = new ValueNode[rank];
+            for (int i = 0; i < newmultiarrayNode.dimensionCount(); i++) {
+                dims[i] = newmultiarrayNode.dimension(i);
+            }
+            HotSpotResolvedJavaType type = (HotSpotResolvedJavaType) newmultiarrayNode.type();
+            ConstantNode hub = ConstantNode.forConstant(type.klass(), runtime, graph);
+            Key key = new Key(newmultiarray).add("dimensions", vargargs(new int[rank], StampFactory.forKind(Kind.Int))).add("rank", rank);
+            Arguments arguments = arguments("dimensions", dims).add("hub", hub);
+            SnippetTemplate template = cache.get(key, assumptions);
+            template.instantiate(runtime, newmultiarrayNode, DEFAULT_REPLACER, arguments);
+        }
+    }
+
+    private static final SnippetCounter.Group countersNew = GraalOptions.SnippetCounters ? new SnippetCounter.Group("NewInstance") : null;
+    private static final SnippetCounter new_seqInit = new SnippetCounter(countersNew, "tlabSeqInit", "TLAB alloc with unrolled zeroing");
+    private static final SnippetCounter new_loopInit = new SnippetCounter(countersNew, "tlabLoopInit", "TLAB alloc with zeroing in a loop");
+    private static final SnippetCounter new_stub = new SnippetCounter(countersNew, "stub", "alloc and zeroing via stub");
+
+    private static final SnippetCounter.Group countersNewPrimitiveArray = GraalOptions.SnippetCounters ? new SnippetCounter.Group("NewPrimitiveArray") : null;
+    private static final SnippetCounter newarray_loopInit = new SnippetCounter(countersNewPrimitiveArray, "tlabLoopInit", "TLAB alloc with zeroing in a loop");
+    private static final SnippetCounter newarray_stub = new SnippetCounter(countersNewPrimitiveArray, "stub", "alloc and zeroing via stub");
+
+    private static final SnippetCounter.Group countersNewObjectArray = GraalOptions.SnippetCounters ? new SnippetCounter.Group("NewObjectArray") : null;
+    private static final SnippetCounter anewarray_loopInit = new SnippetCounter(countersNewObjectArray, "tlabLoopInit", "TLAB alloc with zeroing in a loop");
+    private static final SnippetCounter anewarray_stub = new SnippetCounter(countersNewObjectArray, "stub", "alloc and zeroing via stub");
+}
--- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Tue Nov 27 12:12:02 2012 +0100
@@ -1086,32 +1086,60 @@
         ValueNode value = frameState.ipop();
 
         int nofCases = bs.numberOfCases();
+        double[] keyProbabilities = switchProbability(nofCases + 1, bci);
 
-        Map<Integer, Integer> bciToSuccessorIndex = new HashMap<>();
-        int successorCount = currentBlock.successors.size();
-        for (int i = 0; i < successorCount; i++) {
-            assert !bciToSuccessorIndex.containsKey(currentBlock.successors.get(i).startBci);
-            if (!bciToSuccessorIndex.containsKey(currentBlock.successors.get(i).startBci)) {
-                bciToSuccessorIndex.put(currentBlock.successors.get(i).startBci, i);
+        Map<Integer, SuccessorInfo> bciToBlockSuccessorIndex = new HashMap<>();
+        for (int i = 0; i < currentBlock.successors.size(); i++) {
+            assert !bciToBlockSuccessorIndex.containsKey(currentBlock.successors.get(i).startBci);
+            if (!bciToBlockSuccessorIndex.containsKey(currentBlock.successors.get(i).startBci)) {
+                bciToBlockSuccessorIndex.put(currentBlock.successors.get(i).startBci, new SuccessorInfo(i));
             }
         }
 
-        double[] keyProbabilities = switchProbability(nofCases + 1, bci);
-
+        ArrayList<Block> actualSuccessors = new ArrayList<>();
         int[] keys = new int[nofCases];
         int[] keySuccessors = new int[nofCases + 1];
-        for (int i = 0; i < nofCases; i++) {
-            keys[i] = bs.keyAt(i);
-            keySuccessors[i] = bciToSuccessorIndex.get(bs.targetAt(i));
-        }
-        keySuccessors[nofCases] = bciToSuccessorIndex.get(bs.defaultTarget());
+        int deoptSuccessorIndex = -1;
+        int nextSuccessorIndex = 0;
+        for (int i = 0; i < nofCases + 1; i++) {
+            if (i < nofCases) {
+                keys[i] = bs.keyAt(i);
+            }
 
-        double[] successorProbabilities = IntegerSwitchNode.successorProbabilites(successorCount, keySuccessors, keyProbabilities);
-        IntegerSwitchNode lookupSwitch = currentGraph.add(new IntegerSwitchNode(value, successorCount, keys, keyProbabilities, keySuccessors));
-        for (int i = 0; i < successorCount; i++) {
-            lookupSwitch.setBlockSuccessor(i, createBlockTarget(successorProbabilities[i], currentBlock.successors.get(i), frameState));
+            if (isNeverExecutedCode(keyProbabilities[i])) {
+                if (deoptSuccessorIndex < 0) {
+                    deoptSuccessorIndex = nextSuccessorIndex++;
+                    actualSuccessors.add(null);
+                }
+                keySuccessors[i] = deoptSuccessorIndex;
+            } else {
+                int targetBci = i >= nofCases ? bs.defaultTarget() : bs.targetAt(i);
+                SuccessorInfo info = bciToBlockSuccessorIndex.get(targetBci);
+                if (info.actualIndex < 0) {
+                    info.actualIndex = nextSuccessorIndex++;
+                    actualSuccessors.add(currentBlock.successors.get(info.blockIndex));
+                }
+                keySuccessors[i] = info.actualIndex;
+            }
         }
-        append(lookupSwitch);
+
+        double[] successorProbabilities = IntegerSwitchNode.successorProbabilites(actualSuccessors.size(), keySuccessors, keyProbabilities);
+        IntegerSwitchNode switchNode = currentGraph.add(new IntegerSwitchNode(value, actualSuccessors.size(), keys, keyProbabilities, keySuccessors));
+        for (int i = 0; i < actualSuccessors.size(); i++) {
+            switchNode.setBlockSuccessor(i, createBlockTarget(successorProbabilities[i], actualSuccessors.get(i), frameState));
+        }
+
+        append(switchNode);
+    }
+
+    private static class SuccessorInfo {
+        int blockIndex;
+        int actualIndex;
+
+        public SuccessorInfo(int blockSuccessorIndex) {
+            this.blockIndex = blockSuccessorIndex;
+            actualIndex = -1;
+        }
     }
 
     private ConstantNode appendConstant(Constant constant) {
@@ -1204,13 +1232,18 @@
 
     private FixedNode createTarget(double probability, Block block, FrameStateBuilder stateAfter) {
         assert probability >= 0 && probability <= 1.01 : probability;
-        if (probability == 0 && optimisticOpts.removeNeverExecutedCode() && entryBCI == StructuredGraph.INVOCATION_ENTRY_BCI) {
+        if (isNeverExecutedCode(probability)) {
             return currentGraph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.UnreachedCode, graphId));
         } else {
+            assert block != null;
             return createTarget(block, stateAfter);
         }
     }
 
+    private boolean isNeverExecutedCode(double probability) {
+        return probability == 0 && optimisticOpts.removeNeverExecutedCode() && entryBCI == StructuredGraph.INVOCATION_ENTRY_BCI;
+    }
+
     private FixedNode createTarget(Block block, FrameStateBuilder state) {
         assert block != null && state != null;
         assert !block.isExceptionEntry || state.stackSize() == 1;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java	Tue Nov 27 12:12:02 2012 +0100
@@ -31,8 +31,8 @@
 import com.oracle.graal.api.code.CompilationResult.JumpTable;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.LIRInstruction.Opcode;
@@ -215,29 +215,33 @@
 
         @Override
         public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
+            assert isSorted(lowKeys) && isSorted(highKeys);
+
+            Label actualDefaultTarget = defaultTarget == null ? new Label() : defaultTarget.label();
+            int prevHighKey = 0;
+            boolean skipLowCheck = false;
             for (int i = 0; i < lowKeys.length; i++) {
                 int lowKey = lowKeys[i];
                 int highKey = highKeys[i];
                 if (lowKey == highKey) {
                     masm.cmpl(asIntReg(key), lowKey);
                     masm.jcc(ConditionFlag.equal, keyTargets[i].label());
-                } else if (lowKey + 1 == highKey) {
-                    masm.cmpl(asIntReg(key), lowKey);
-                    masm.jcc(ConditionFlag.equal, keyTargets[i].label());
-                    masm.cmpl(asIntReg(key), highKey);
-                    masm.jcc(ConditionFlag.equal, keyTargets[i].label());
+                    skipLowCheck = false;
                 } else {
-                    Label skip = new Label();
-                    masm.cmpl(asIntReg(key), lowKey);
-                    masm.jcc(ConditionFlag.less, skip);
+                    if (!skipLowCheck || (prevHighKey + 1) != lowKey) {
+                        masm.cmpl(asIntReg(key), lowKey);
+                        masm.jcc(ConditionFlag.less, actualDefaultTarget);
+                    }
                     masm.cmpl(asIntReg(key), highKey);
                     masm.jcc(ConditionFlag.lessEqual, keyTargets[i].label());
-                    masm.bind(skip);
+                    skipLowCheck = true;
                 }
+                prevHighKey = highKey;
             }
             if (defaultTarget != null) {
                 masm.jmp(defaultTarget.label());
             } else {
+                masm.bind(actualDefaultTarget);
                 masm.hlt();
             }
         }
@@ -259,6 +263,15 @@
         public void setFallThroughTarget(LabelRef target) {
             defaultTarget = target;
         }
+
+        private static boolean isSorted(int[] values) {
+            for (int i = 1; i < values.length; i++) {
+                if (values[i - 1] >= values[i]) {
+                    return false;
+                }
+            }
+            return true;
+        }
     }
 
 
--- a/graal/com.oracle.graal.loop/src/com/oracle/graal/loop/LoopTransformations.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.loop/src/com/oracle/graal/loop/LoopTransformations.java	Tue Nov 27 12:12:02 2012 +0100
@@ -77,7 +77,7 @@
         loop.inside().duplicate().insertBefore(loop);
     }
 
-    public static void fullUnroll(LoopEx loop, MetaAccessProvider runtime) {
+    public static void fullUnroll(LoopEx loop, MetaAccessProvider runtime, Assumptions assumptions) {
         //assert loop.isCounted(); //TODO (gd) strenghten : counted with known trip count
         int iterations = 0;
         LoopBeginNode loopBegin = loop.loopBegin();
@@ -85,7 +85,7 @@
         while (!loopBegin.isDeleted()) {
             int mark = graph.getMark();
             peel(loop);
-            new CanonicalizerPhase(null, runtime, null, mark, null).apply(graph);
+            new CanonicalizerPhase(null, runtime, assumptions, mark, null).apply(graph);
             if (iterations++ > UNROLL_LIMIT || graph.getNodeCount() > GraalOptions.MaximumDesiredSize * 3) {
                 throw new BailoutException("FullUnroll : Graph seems to grow out of proportion");
             }
--- a/graal/com.oracle.graal.loop/src/com/oracle/graal/loop/phases/LoopFullUnrollPhase.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.loop/src/com/oracle/graal/loop/phases/LoopFullUnrollPhase.java	Tue Nov 27 12:12:02 2012 +0100
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.loop.phases;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.loop.*;
 import com.oracle.graal.nodes.*;
@@ -32,9 +33,11 @@
 public class LoopFullUnrollPhase extends Phase {
     private static final DebugMetric FULLY_UNROLLED_LOOPS = Debug.metric("FullUnrolls");
     private final GraalCodeCacheProvider runtime;
+    private final Assumptions assumptions;
 
-    public LoopFullUnrollPhase(GraalCodeCacheProvider runtime) {
+    public LoopFullUnrollPhase(GraalCodeCacheProvider runtime, Assumptions assumptions) {
         this.runtime = runtime;
+        this.assumptions = assumptions;
     }
 
     @Override
@@ -48,7 +51,7 @@
                 for (LoopEx loop : dataCounted.countedLoops()) {
                     if (LoopPolicies.shouldFullUnroll(loop)) {
                         Debug.log("FullUnroll %s", loop);
-                        LoopTransformations.fullUnroll(loop, runtime);
+                        LoopTransformations.fullUnroll(loop, runtime, assumptions);
                         FULLY_UNROLLED_LOOPS.increment();
                         Debug.dump(graph, "After fullUnroll %s", loop);
                         peeled = true;
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/LoadHubNode.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/LoadHubNode.java	Tue Nov 27 12:12:02 2012 +0100
@@ -58,7 +58,7 @@
             ResolvedJavaType exactType;
             if (stamp.isExactType()) {
                 exactType = stamp.type();
-            } else if (stamp.type() != null && tool.assumptions() != null) {
+            } else if (stamp.type() != null && tool.assumptions().useOptimisticAssumptions()) {
                 exactType = stamp.type().findUniqueConcreteSubtype();
                 if (exactType != null) {
                     tool.assumptions().recordConcreteSubtype(stamp.type(), exactType);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/RegisterFinalizerNode.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/RegisterFinalizerNode.java	Tue Nov 27 12:12:02 2012 +0100
@@ -62,7 +62,7 @@
             needsCheck = stamp.type().hasFinalizer();
         } else if (stamp.type() != null && !stamp.type().hasFinalizableSubclass()) {
             // if either the declared type of receiver or the holder can be assumed to have no finalizers
-            if (tool.assumptions() != null && tool.assumptions().recordNoFinalizableSubclassAssumption(stamp.type())) {
+            if (tool.assumptions().useOptimisticAssumptions() && tool.assumptions().recordNoFinalizableSubclassAssumption(stamp.type())) {
                 needsCheck = false;
             }
         }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/TypeSwitchNode.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/TypeSwitchNode.java	Tue Nov 27 12:12:02 2012 +0100
@@ -41,8 +41,8 @@
     private final ResolvedJavaType[] keys;
 
     /**
-     * Constructs a type switch instruction. The keyProbabilities and keySuccessors array contain key.length + 1
-     * entries, the last entry describes the default (fall through) case.
+     * Constructs a type switch instruction. The keyProbabilities array contain key.length + 1
+     * entries. The last entry in every array describes the default case.
      *
      * @param value the instruction producing the value being switched on
      * @param successors the list of successors
@@ -52,8 +52,8 @@
      */
     public TypeSwitchNode(ValueNode value, BeginNode[] successors, double[] successorProbabilities, ResolvedJavaType[] keys, double[] keyProbabilities, int[] keySuccessors) {
         super(value, successors, successorProbabilities, keySuccessors, keyProbabilities);
-        assert successors.length == keys.length + 1;
-        assert successors.length == keyProbabilities.length;
+        assert successors.length <= keys.length + 1;
+        assert keySuccessors.length == keyProbabilities.length;
         this.keys = keys;
     }
 
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/ComputeProbabilityPhase.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/ComputeProbabilityPhase.java	Tue Nov 27 12:12:02 2012 +0100
@@ -100,6 +100,8 @@
                 double originalProbability = probability / frequency;
                 assert isRelativeProbability(originalProbability);
                 return (1 / frequency) * Math.max(1, Math.pow(originalProbability, 1.5) * Math.log10(frequency));
+            case -4:
+                return 1 / probability;
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -337,6 +339,7 @@
 
     private static FrequencyPropagationPolicy createFrequencyPropagationPolicy() {
         switch (GraalOptions.LoopFrequencyPropagationPolicy) {
+            case -4:
             case -3:
             case -2:
             case -1:
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningPhase.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningPhase.java	Tue Nov 27 12:12:02 2012 +0100
@@ -24,20 +24,19 @@
 
 import java.lang.reflect.*;
 import java.util.*;
-import java.util.concurrent.*;
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.debug.*;
-import com.oracle.graal.debug.internal.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.phases.*;
-import com.oracle.graal.phases.PhasePlan.*;
-import com.oracle.graal.phases.common.InliningUtil.*;
-import com.oracle.graal.phases.util.*;
-
+import com.oracle.graal.phases.PhasePlan.PhasePosition;
+import com.oracle.graal.phases.common.InliningUtil.InlineInfo;
+import com.oracle.graal.phases.common.InliningUtil.InliningCallback;
+import com.oracle.graal.phases.common.InliningUtil.InliningPolicy;
+import com.oracle.graal.phases.common.InliningUtil.WeightComputationPolicy;
 
 public class InliningPhase extends Phase implements InliningCallback {
     /*
@@ -47,139 +46,69 @@
      */
 
     private final TargetDescription target;
-    private final GraalCodeCacheProvider runtime;
-
-    private final Collection<? extends Invoke> hints;
+    private final PhasePlan plan;
 
-    private final PriorityQueue<InlineInfo> inlineCandidates = new PriorityQueue<>();
-    private Assumptions assumptions;
-
-    private final PhasePlan plan;
+    private final GraalCodeCacheProvider runtime;
+    private final Assumptions assumptions;
     private final GraphCache cache;
-    private final WeightComputationPolicy weightComputationPolicy;
     private final InliningPolicy inliningPolicy;
-    private final OptimisticOptimizations optimisticOpts;
 
     // Metrics
     private static final DebugMetric metricInliningPerformed = Debug.metric("InliningPerformed");
     private static final DebugMetric metricInliningConsidered = Debug.metric("InliningConsidered");
     private static final DebugMetric metricInliningStoppedByMaxDesiredSize = Debug.metric("InliningStoppedByMaxDesiredSize");
+    private static final DebugMetric metricInliningRuns = Debug.metric("Runs");
 
-    public InliningPhase(TargetDescription target, GraalCodeCacheProvider runtime, Collection<? extends Invoke> hints, Assumptions assumptions, GraphCache cache, PhasePlan plan, OptimisticOptimizations optimisticOpts) {
+    public InliningPhase(TargetDescription target, GraalCodeCacheProvider runtime, Collection<Invoke> hints, Assumptions assumptions, GraphCache cache, PhasePlan plan, OptimisticOptimizations optimisticOpts) {
+        this(target, runtime, assumptions, cache, plan, createInliningPolicy(runtime, assumptions, optimisticOpts, hints));
+    }
+
+    public InliningPhase(TargetDescription target, GraalCodeCacheProvider runtime, Assumptions assumptions, GraphCache cache, PhasePlan plan, InliningPolicy inliningPolicy) {
         this.target = target;
         this.runtime = runtime;
-        this.hints = hints;
         this.assumptions = assumptions;
         this.cache = cache;
         this.plan = plan;
-        this.optimisticOpts = optimisticOpts;
-        this.weightComputationPolicy = createWeightComputationPolicy();
-        this.inliningPolicy = createInliningPolicy();
+        this.inliningPolicy = inliningPolicy;
     }
 
-    @SuppressWarnings("unchecked")
     @Override
     protected void run(final StructuredGraph graph) {
-        graph.createNodeMap();
-
-        if (hints != null) {
-            scanInvokes((Iterable<? extends Node>) Util.uncheckedCast(this.hints));
-        } else {
-            scanInvokes(graph.getNodes(InvokeNode.class));
-            scanInvokes(graph.getNodes(InvokeWithExceptionNode.class));
-        }
+        inliningPolicy.initialize(graph);
 
-        while (!inlineCandidates.isEmpty() && graph.getNodeCount() < GraalOptions.MaximumDesiredSize) {
-            InlineInfo candidate = inlineCandidates.remove();
-            if (!candidate.invoke.node().isAlive()) {
-                continue;
-            }
-            // refresh infos
-            final InlineInfo info = InliningUtil.getInlineInfo(candidate.invoke, candidate.level, runtime, assumptions, this, optimisticOpts);
-
-            boolean inline = Debug.scope("InliningDecisions", new Callable<Boolean>() {
-                @Override
-                public Boolean call() throws Exception {
-                    return info != null && inliningPolicy.isWorthInlining(graph, info);
-                }
-            });
+        while (inliningPolicy.continueInlining(graph)) {
+            final InlineInfo candidate = inliningPolicy.next();
+            if (candidate != null) {
+                boolean isWorthInlining = inliningPolicy.isWorthInlining(candidate);
 
-            if (inline) {
-                int mark = graph.getMark();
-                Iterable<Node> newNodes = null;
-                try {
-                    info.inline(graph, runtime, this);
-                    Debug.dump(graph, "after %s", info);
-                    newNodes = graph.getNewNodes(mark);
-                    if (GraalOptions.OptCanonicalizer) {
-                        new CanonicalizerPhase(target, runtime, assumptions, mark, null).apply(graph);
+                metricInliningConsidered.increment();
+                if (isWorthInlining) {
+                    int mark = graph.getMark();
+                    try {
+                        candidate.inline(graph, runtime, this, assumptions);
+                        Debug.dump(graph, "after %s", candidate);
+                        Iterable<Node> newNodes = graph.getNewNodes(mark);
+                        if (GraalOptions.OptCanonicalizer) {
+                            new CanonicalizerPhase(target, runtime, assumptions, mark, null).apply(graph);
+                        }
+                        metricInliningPerformed.increment();
+
+                        inliningPolicy.scanInvokes(newNodes);
+                    } catch (BailoutException bailout) {
+                        // TODO determine if we should really bail out of the whole compilation.
+                        throw bailout;
+                    } catch (AssertionError e) {
+                        throw new GraalInternalError(e).addContext(candidate.toString());
+                    } catch (RuntimeException e) {
+                        throw new GraalInternalError(e).addContext(candidate.toString());
+                    } catch (GraalInternalError e) {
+                        throw e.addContext(candidate.toString());
                     }
-//                    if (GraalOptions.Intrinsify) {
-//                        new IntrinsificationPhase(runtime).apply(graph);
-//                    }
-                    metricInliningPerformed.increment();
-                } catch (BailoutException bailout) {
-                    // TODO determine if we should really bail out of the whole compilation.
-                    throw bailout;
-                } catch (AssertionError e) {
-                    throw new GraalInternalError(e).addContext(info.toString());
-                } catch (RuntimeException e) {
-                    throw new GraalInternalError(e).addContext(info.toString());
-                } catch (GraalInternalError e) {
-                    throw e.addContext(info.toString());
-                }
-
-                if (newNodes != null && info.level < GraalOptions.MaximumInlineLevel) {
-                    scanInvokes(newNodes);
                 }
             }
         }
-
-        if (GraalOptions.Debug && graph.getNodeCount() >= GraalOptions.MaximumDesiredSize) {
-            Debug.scope("InliningDecisions", new Runnable() {
-                public void run() {
-                    for (InlineInfo info : inlineCandidates) {
-                        Debug.log("not inlining %s because inlining cut off by MaximumDesiredSize", InliningUtil.methodName(info));
-                    }
-                }
-            });
-
-            metricInliningStoppedByMaxDesiredSize.increment();
-        }
     }
 
-    private void scanInvokes(final Iterable<? extends Node> nodes) {
-        Debug.scope("InliningDecisions", new Runnable() {
-            public void run() {
-                for (Node node : nodes) {
-                    if (node != null) {
-                        if (node instanceof Invoke) {
-                            Invoke invoke = (Invoke) node;
-                            scanInvoke(invoke);
-                        }
-                        for (Node usage : node.usages().filterInterface(Invoke.class).snapshot()) {
-                            scanInvoke((Invoke) usage);
-                        }
-                    }
-                }
-            }
-        });
-    }
-
-    private void scanInvoke(Invoke invoke) {
-        InlineInfo info = InliningUtil.getInlineInfo(invoke, computeInliningLevel(invoke), runtime, assumptions, this, optimisticOpts);
-        if (info != null) {
-            metricInliningConsidered.increment();
-            inlineCandidates.add(info);
-        }
-    }
-
-    public static final Map<JavaMethod, Integer> parsedMethods = new HashMap<>();
-
-
-
-    private static final DebugMetric metricInliningRuns = Debug.metric("Runs");
-
     @Override
     public StructuredGraph buildGraph(final ResolvedJavaMethod method) {
         metricInliningRuns.increment();
@@ -202,9 +131,6 @@
         if (GraalOptions.OptCanonicalizer) {
             new CanonicalizerPhase(target, runtime, assumptions).apply(newGraph);
         }
-        if (GraalOptions.Intrinsify) {
-            new IntrinsificationPhase(runtime).apply(newGraph);
-        }
         if (GraalOptions.CullFrameStates) {
             new CullFrameStatesPhase().apply(newGraph);
         }
@@ -214,133 +140,63 @@
         return newGraph;
     }
 
-    @Override
-    public double inliningWeight(ResolvedJavaMethod caller, ResolvedJavaMethod method, Invoke invoke) {
-        boolean preferred = hints != null && hints.contains(invoke);
-        return weightComputationPolicy.computeWeight(caller, method, invoke, preferred);
-    }
-
-    public static int graphComplexity(StructuredGraph graph) {
-        int result = 0;
-        for (Node node : graph.getNodes()) {
-            if (node instanceof ConstantNode || node instanceof LocalNode || node instanceof BeginNode || node instanceof ReturnNode || node instanceof UnwindNode) {
-                result += 0;
-            } else if (node instanceof PhiNode) {
-                result += 5;
-            } else if (node instanceof MergeNode || node instanceof Invoke || node instanceof LoopEndNode || node instanceof EndNode) {
-                result += 0;
-            } else if (node instanceof ControlSplitNode) {
-                result += ((ControlSplitNode) node).blockSuccessorCount();
-            } else {
-                result += 1;
-            }
-        }
-        return Math.max(1, result);
-    }
-
-
-    @Override
-    public void recordConcreteMethodAssumption(ResolvedJavaMethod method, ResolvedJavaType context, ResolvedJavaMethod impl) {
-        assumptions.recordConcreteMethod(method, context, impl);
-    }
-
-    @Override
-    public void recordMethodContentsAssumption(ResolvedJavaMethod method) {
-        if (assumptions != null) {
-            assumptions.recordMethodContents(method);
-        }
+    private interface InliningDecision {
+        boolean isWorthInlining(InlineInfo info);
     }
 
-    private static int computeInliningLevel(Invoke invoke) {
-        int count = -1;
-        FrameState curState = invoke.stateAfter();
-        while (curState != null) {
-            count++;
-            curState = curState.outerFrameState();
-        }
-        return count;
-    }
-
-    private static InliningPolicy createInliningPolicy() {
-        switch(GraalOptions.InliningPolicy) {
-            case 0: return new WeightBasedInliningPolicy();
-            case 1: return new C1StaticSizeBasedInliningPolicy();
-            case 2: return new MinimumCodeSizeBasedInliningPolicy();
-            case 3: return new DynamicSizeBasedInliningPolicy();
-            case 4: return new GreedySizeBasedInliningPolicy();
-            default:
-                GraalInternalError.shouldNotReachHere();
-                return null;
+    private abstract static class AbstractInliningDecision implements InliningDecision {
+        public static boolean decideSizeBasedInlining(InlineInfo info, double maxSize) {
+            boolean success = info.weight() <= maxSize;
+            if (GraalOptions.Debug) {
+                String formatterString = success ? "(size %f <= %f)" : "(too large %f > %f)";
+                InliningUtil.logInliningDecision(info, success, formatterString, info.weight(), maxSize);
+            }
+            return success;
         }
-    }
 
-    private static WeightComputationPolicy createWeightComputationPolicy() {
-        switch(GraalOptions.WeightComputationPolicy) {
-            case 0: throw new GraalInternalError("removed because of invokation counter changes");
-            case 1: return new BytecodeSizeBasedWeightComputationPolicy();
-            case 2: return new ComplexityBasedWeightComputationPolicy();
-            default:
-                GraalInternalError.shouldNotReachHere();
-                return null;
-        }
-    }
-
-    private interface InliningPolicy {
-        boolean isWorthInlining(StructuredGraph callerGraph, InlineInfo info);
-    }
-
-    private static class WeightBasedInliningPolicy implements InliningPolicy {
-        @Override
-        public boolean isWorthInlining(StructuredGraph callerGraph, InlineInfo info) {
-            if (!checkCompiledCodeSize(info)) {
+        public static boolean checkCompiledCodeSize(InlineInfo info) {
+            if (GraalOptions.SmallCompiledCodeSize >= 0 && info.compiledCodeSize() > GraalOptions.SmallCompiledCodeSize) {
+                InliningUtil.logNotInlinedMethod(info, "(CompiledCodeSize %d > %d)", info.compiledCodeSize(), GraalOptions.SmallCompiledCodeSize);
                 return false;
             }
-
-            double penalty = Math.pow(GraalOptions.InliningSizePenaltyExp, callerGraph.getNodeCount() / (double) GraalOptions.MaximumDesiredSize) / GraalOptions.InliningSizePenaltyExp;
-            if (info.weight > GraalOptions.MaximumInlineWeight / (1 + penalty * GraalOptions.InliningSizePenalty)) {
-                Debug.log("not inlining %s (cut off by weight %e)", InliningUtil.methodName(info), info.weight);
-                return false;
-            }
-
-            Debug.log("inlining %s (weight %f): %s", InliningUtil.methodName(info), info.weight);
             return true;
         }
     }
 
-    private static class C1StaticSizeBasedInliningPolicy implements InliningPolicy {
+    private static class C1StaticSizeBasedInliningDecision extends AbstractInliningDecision {
         @Override
-        public boolean isWorthInlining(StructuredGraph callerGraph, InlineInfo info) {
-            double maxSize = Math.max(GraalOptions.MaximumTrivialSize, Math.pow(GraalOptions.NestedInliningSizeRatio, info.level) * GraalOptions.MaximumInlineSize);
+        public boolean isWorthInlining(InlineInfo info) {
+            double maxSize = Math.max(GraalOptions.MaximumTrivialSize, Math.pow(GraalOptions.NestedInliningSizeRatio, info.level()) * GraalOptions.MaximumInlineSize);
             return decideSizeBasedInlining(info, maxSize);
         }
     }
 
-    private static class MinimumCodeSizeBasedInliningPolicy implements InliningPolicy {
+    private static class MinimumCodeSizeBasedInliningDecision extends AbstractInliningDecision {
         @Override
-        public boolean isWorthInlining(StructuredGraph callerGraph, InlineInfo info) {
+        public boolean isWorthInlining(InlineInfo info) {
             assert GraalOptions.ProbabilityAnalysis;
             if (!checkCompiledCodeSize(info)) {
                 return false;
             }
 
-            double inlineWeight = Math.min(GraalOptions.ProbabilityCapForInlining, info.invoke.probability());
-            double maxSize = Math.pow(GraalOptions.NestedInliningSizeRatio, info.level) * GraalOptions.MaximumInlineSize * inlineWeight;
+            double inlineWeight = Math.min(GraalOptions.ProbabilityCapForInlining, info.invoke().probability());
+            double maxSize = Math.pow(GraalOptions.NestedInliningSizeRatio, info.level()) * GraalOptions.MaximumInlineSize * inlineWeight;
             maxSize = Math.max(GraalOptions.MaximumTrivialSize, maxSize);
 
             return decideSizeBasedInlining(info, maxSize);
         }
     }
 
-    private static class DynamicSizeBasedInliningPolicy implements InliningPolicy {
+    private static class DynamicSizeBasedInliningDecision extends AbstractInliningDecision {
         @Override
-        public boolean isWorthInlining(StructuredGraph callerGraph, InlineInfo info) {
+        public boolean isWorthInlining(InlineInfo info) {
             assert GraalOptions.ProbabilityAnalysis;
             if (!checkCompiledCodeSize(info)) {
                 return false;
             }
 
-            double inlineBoost = Math.min(GraalOptions.ProbabilityCapForInlining, info.invoke.probability()) + Math.log10(Math.max(1, info.invoke.probability() - GraalOptions.ProbabilityCapForInlining + 1));
-            double maxSize = Math.pow(GraalOptions.NestedInliningSizeRatio, info.level) * GraalOptions.MaximumInlineSize;
+            double inlineBoost = Math.min(GraalOptions.ProbabilityCapForInlining, info.invoke().probability()) + Math.log10(Math.max(1, info.invoke().probability() - GraalOptions.ProbabilityCapForInlining + 1));
+            double maxSize = Math.pow(GraalOptions.NestedInliningSizeRatio, info.level()) * GraalOptions.MaximumInlineSize;
             maxSize = maxSize + maxSize * inlineBoost;
             maxSize = Math.min(GraalOptions.MaximumGreedyInlineSize, Math.max(GraalOptions.MaximumTrivialSize, maxSize));
 
@@ -348,9 +204,9 @@
         }
     }
 
-    private static class GreedySizeBasedInliningPolicy implements InliningPolicy {
+    private static class GreedySizeBasedInliningDecision extends AbstractInliningDecision {
         @Override
-        public boolean isWorthInlining(StructuredGraph callerGraph, InlineInfo info) {
+        public boolean isWorthInlining(InlineInfo info) {
             assert GraalOptions.ProbabilityAnalysis;
             if (!checkCompiledCodeSize(info)) {
                 return false;
@@ -358,42 +214,34 @@
 
             double maxSize = GraalOptions.MaximumGreedyInlineSize;
             if (GraalOptions.InliningBonusPerTransferredValue != 0) {
-                Signature signature = info.invoke.methodCallTarget().targetMethod().getSignature();
-                int transferredValues = signature.getParameterCount(!Modifier.isStatic(info.invoke.methodCallTarget().targetMethod().getModifiers()));
+                Signature signature = info.invoke().methodCallTarget().targetMethod().getSignature();
+                int transferredValues = signature.getParameterCount(!Modifier.isStatic(info.invoke().methodCallTarget().targetMethod().getModifiers()));
                 if (signature.getReturnKind() != Kind.Void) {
                     transferredValues++;
                 }
                 maxSize += transferredValues * GraalOptions.InliningBonusPerTransferredValue;
             }
 
-            double inlineRatio = Math.min(GraalOptions.ProbabilityCapForInlining, info.invoke.probability());
-            maxSize = Math.pow(GraalOptions.NestedInliningSizeRatio, info.level) * maxSize * inlineRatio;
+            double inlineRatio = Math.min(GraalOptions.ProbabilityCapForInlining, info.invoke().probability());
+            maxSize = Math.pow(GraalOptions.NestedInliningSizeRatio, info.level()) * maxSize * inlineRatio;
             maxSize = Math.max(maxSize, GraalOptions.MaximumTrivialSize);
 
             return decideSizeBasedInlining(info, maxSize);
         }
     }
 
-    private static boolean decideSizeBasedInlining(InlineInfo info, double maxSize) {
-        boolean success = info.weight <= maxSize;
-        if (DebugScope.getInstance().isLogEnabled()) {
-            String formatterString = success ? "inlining %s (size %f <= %f)" : "not inlining %s (too large %f > %f)";
-            Debug.log(formatterString, InliningUtil.methodName(info), info.weight, maxSize);
-        }
-        return success;
-    }
+    private static class GreedyMachineCodeInliningDecision extends AbstractInliningDecision {
+        @Override
+        public boolean isWorthInlining(InlineInfo info) {
+            assert GraalOptions.ProbabilityAnalysis;
 
-    private static boolean checkCompiledCodeSize(InlineInfo info) {
-        if (GraalOptions.SmallCompiledCodeSize >= 0 && info.compiledCodeSize() > GraalOptions.SmallCompiledCodeSize) {
-            Debug.log("not inlining %s (CompiledCodeSize %d > %d)", InliningUtil.methodName(info), info.compiledCodeSize(), GraalOptions.SmallCompiledCodeSize);
-            return false;
+            double maxSize = GraalOptions.MaximumGreedyInlineSize;
+            double inlineRatio = Math.min(GraalOptions.ProbabilityCapForInlining, info.invoke().probability());
+            maxSize = Math.pow(GraalOptions.NestedInliningSizeRatio, info.level()) * maxSize * inlineRatio;
+            maxSize = Math.max(maxSize, GraalOptions.MaximumTrivialSize);
+
+            return decideSizeBasedInlining(info, maxSize);
         }
-        return true;
-    }
-
-
-    private interface WeightComputationPolicy {
-        double computeWeight(ResolvedJavaMethod caller, ResolvedJavaMethod method, Invoke invoke, boolean preferredInvoke);
     }
 
     private static class BytecodeSizeBasedWeightComputationPolicy implements WeightComputationPolicy {
@@ -417,4 +265,294 @@
             return complexity;
         }
     }
+
+    private static class CompiledCodeSizeWeightComputationPolicy implements WeightComputationPolicy {
+        @Override
+        public double computeWeight(ResolvedJavaMethod caller, ResolvedJavaMethod method, Invoke invoke, boolean preferredInvoke) {
+            int compiledCodeSize = method.getCompiledCodeSize();
+            return compiledCodeSize > 0 ? compiledCodeSize : method.getCodeSize() * 10;
+        }
+    }
+
+    private static class CFInliningPolicy implements InliningPolicy {
+        private final InliningDecision inliningDecision;
+        private final WeightComputationPolicy weightComputationPolicy;
+        private final Collection<Invoke> hints;
+        private final GraalCodeCacheProvider runtime;
+        private final Assumptions assumptions;
+        private final OptimisticOptimizations optimisticOpts;
+        private final Deque<Invoke> sortedInvokes;
+        private NodeBitMap visitedFixedNodes;
+        private FixedNode invokePredecessor;
+
+        public CFInliningPolicy(InliningDecision inliningPolicy, WeightComputationPolicy weightComputationPolicy, Collection<Invoke> hints,
+                        GraalCodeCacheProvider runtime, Assumptions assumptions, OptimisticOptimizations optimisticOpts) {
+            this.inliningDecision = inliningPolicy;
+            this.weightComputationPolicy = weightComputationPolicy;
+            this.hints = hints;
+            this.runtime = runtime;
+            this.assumptions = assumptions;
+            this.optimisticOpts = optimisticOpts;
+            this.sortedInvokes = new ArrayDeque<>();
+        }
+
+        public boolean continueInlining(StructuredGraph graph) {
+            if (graph.getNodeCount() >= GraalOptions.MaximumDesiredSize) {
+                InliningUtil.logInliningDecision("inlining is cut off by MaximumDesiredSize");
+                metricInliningStoppedByMaxDesiredSize.increment();
+                return false;
+            }
+
+            return !sortedInvokes.isEmpty();
+        }
+
+        public InlineInfo next() {
+            Invoke invoke = sortedInvokes.pop();
+            InlineInfo info = InliningUtil.getInlineInfo(invoke, runtime, assumptions, this, optimisticOpts);
+            if (info != null) {
+                invokePredecessor = (FixedNode) info.invoke().predecessor();
+            }
+            return info;
+        }
+
+        public boolean isWorthInlining(InlineInfo info) {
+            return inliningDecision.isWorthInlining(info);
+        }
+
+        public void initialize(StructuredGraph graph) {
+            visitedFixedNodes = graph.createNodeBitMap(true);
+            scanGraphForInvokes(graph.start());
+            if (hints != null) {
+                sortedInvokes.retainAll(hints);
+            }
+        }
+
+        public void scanInvokes(Iterable<? extends Node> newNodes) {
+            scanGraphForInvokes(invokePredecessor);
+        }
+
+        private void scanGraphForInvokes(FixedNode start) {
+            ArrayList<Invoke> invokes = new InliningIterator(start, visitedFixedNodes).apply();
+
+            // insert the newly found invokes in their correct control-flow order
+            for (int i = invokes.size() - 1; i >= 0; i--) {
+                sortedInvokes.addFirst(invokes.get(i));
+            }
+        }
+
+        public double inliningWeight(ResolvedJavaMethod caller, ResolvedJavaMethod method, Invoke invoke) {
+            boolean preferredInvoke = hints != null && hints.contains(invoke);
+            return weightComputationPolicy.computeWeight(caller, method, invoke, preferredInvoke);
+        }
+    }
+
+    private static class PriorityInliningPolicy implements InliningPolicy {
+        private final InliningDecision inliningDecision;
+        private final WeightComputationPolicy weightComputationPolicy;
+        private final Collection<Invoke> hints;
+        private final GraalCodeCacheProvider runtime;
+        private final Assumptions assumptions;
+        private final OptimisticOptimizations optimisticOpts;
+        private final PriorityQueue<InlineInfo> sortedCandidates;
+
+        public PriorityInliningPolicy(InliningDecision inliningPolicy, WeightComputationPolicy weightComputationPolicy, Collection<Invoke> hints,
+                        GraalCodeCacheProvider runtime, Assumptions assumptions, OptimisticOptimizations optimisticOpts) {
+            this.inliningDecision = inliningPolicy;
+            this.weightComputationPolicy = weightComputationPolicy;
+            this.hints = hints;
+            this.runtime = runtime;
+            this.assumptions = assumptions;
+            this.optimisticOpts = optimisticOpts;
+            sortedCandidates = new PriorityQueue<>();
+        }
+
+        public boolean continueInlining(StructuredGraph graph) {
+            if (graph.getNodeCount() >= GraalOptions.MaximumDesiredSize) {
+                InliningUtil.logInliningDecision("inlining is cut off by MaximumDesiredSize");
+                metricInliningStoppedByMaxDesiredSize.increment();
+                return false;
+            }
+
+            return !sortedCandidates.isEmpty();
+        }
+
+        public InlineInfo next() {
+            // refresh cached info before using it (it might have been in the queue for a long time)
+            InlineInfo info = sortedCandidates.remove();
+            return InliningUtil.getInlineInfo(info.invoke(), runtime, assumptions, this, optimisticOpts);
+        }
+
+        @Override
+        public boolean isWorthInlining(InlineInfo info) {
+            return inliningDecision.isWorthInlining(info);
+        }
+
+        @SuppressWarnings("unchecked")
+        public void initialize(StructuredGraph graph) {
+            if (hints == null) {
+                scanInvokes(graph.getNodes(InvokeNode.class));
+                scanInvokes(graph.getNodes(InvokeWithExceptionNode.class));
+            } else {
+                scanInvokes((Iterable<? extends Node>) (Iterable<?>) hints);
+            }
+        }
+
+        public void scanInvokes(Iterable<? extends Node> nodes) {
+            for (Node node: nodes) {
+                if (node != null) {
+                    if (node instanceof Invoke) {
+                        Invoke invoke = (Invoke) node;
+                        scanInvoke(invoke);
+                    }
+                    for (Node usage : node.usages().filterInterface(Invoke.class).snapshot()) {
+                        scanInvoke((Invoke) usage);
+                    }
+                }
+            }
+        }
+
+        private void scanInvoke(Invoke invoke) {
+            InlineInfo info = InliningUtil.getInlineInfo(invoke, runtime, assumptions, this, optimisticOpts);
+            if (info != null) {
+                sortedCandidates.add(info);
+            }
+        }
+
+        @Override
+        public double inliningWeight(ResolvedJavaMethod caller, ResolvedJavaMethod method, Invoke invoke) {
+            boolean preferredInvoke = hints != null && hints.contains(invoke);
+            return weightComputationPolicy.computeWeight(caller, method, invoke, preferredInvoke);
+        }
+    }
+
+    private static class InliningIterator {
+        private final FixedNode start;
+        private final NodeBitMap processedNodes;
+
+        private final Deque<FixedNode> nodeQueue;
+        private final NodeBitMap queuedNodes;
+
+        public InliningIterator(FixedNode start, NodeBitMap visitedFixedNodes) {
+            this.start = start;
+            this.processedNodes = visitedFixedNodes;
+
+            this.nodeQueue = new ArrayDeque<>();
+            this.queuedNodes = visitedFixedNodes.copy();
+
+            assert start.isAlive();
+        }
+
+        public ArrayList<Invoke> apply() {
+            ArrayList<Invoke> invokes = new ArrayList<>();
+            FixedNode current = start;
+            do {
+                assert current.isAlive();
+                processedNodes.mark(current);
+
+                if (current instanceof InvokeWithExceptionNode || current instanceof InvokeNode) {
+                    invokes.add((Invoke) current);
+                    queueSuccessors(current);
+                    current = nextQueuedNode();
+                } else if (current instanceof LoopBeginNode) {
+                    current = ((LoopBeginNode) current).next();
+                    assert current != null;
+                } else if (current instanceof LoopEndNode) {
+                    current = nextQueuedNode();
+                } else if (current instanceof MergeNode) {
+                    current = ((MergeNode) current).next();
+                    assert current != null;
+                } else if (current instanceof FixedWithNextNode) {
+                    queueSuccessors(current);
+                    current = nextQueuedNode();
+                } else if (current instanceof EndNode) {
+                    queueMerge((EndNode) current);
+                    current = nextQueuedNode();
+                } else if (current instanceof DeoptimizeNode) {
+                    current = nextQueuedNode();
+                } else if (current instanceof ReturnNode) {
+                    current = nextQueuedNode();
+                } else if (current instanceof UnwindNode) {
+                    current = nextQueuedNode();
+                } else if (current instanceof ControlSplitNode) {
+                    queueSuccessors(current);
+                    current = nextQueuedNode();
+                } else {
+                    assert false : current;
+                }
+            } while(current != null);
+
+            return invokes;
+        }
+
+        private void queueSuccessors(FixedNode x) {
+            for (Node node : x.successors()) {
+                if (node != null && !queuedNodes.isMarked(node)) {
+                    queuedNodes.mark(node);
+                    nodeQueue.addFirst((FixedNode) node);
+                }
+            }
+        }
+
+        private FixedNode nextQueuedNode() {
+            if (nodeQueue.isEmpty()) {
+                return null;
+            }
+
+            FixedNode result = nodeQueue.removeFirst();
+            assert queuedNodes.isMarked(result);
+            return result;
+        }
+
+        private void queueMerge(EndNode end) {
+            MergeNode merge = end.merge();
+            if (!queuedNodes.isMarked(merge) && visitedAllEnds(merge)) {
+                queuedNodes.mark(merge);
+                nodeQueue.add(merge);
+            }
+        }
+
+        private boolean visitedAllEnds(MergeNode merge) {
+            for (int i = 0; i < merge.forwardEndCount(); i++) {
+                if (!processedNodes.isMarked(merge.forwardEndAt(i))) {
+                    return false;
+                }
+            }
+            return true;
+        }
+    }
+
+    private static InliningPolicy createInliningPolicy(GraalCodeCacheProvider runtime, Assumptions assumptions, OptimisticOptimizations optimisticOpts, Collection<Invoke> hints) {
+        switch(GraalOptions.InliningPolicy) {
+            case 0: return new CFInliningPolicy(createInliningDecision(), createWeightComputationPolicy(), hints, runtime, assumptions, optimisticOpts);
+            case 1: return new PriorityInliningPolicy(createInliningDecision(), createWeightComputationPolicy(), hints, runtime, assumptions, optimisticOpts);
+            default:
+                GraalInternalError.shouldNotReachHere();
+                return null;
+        }
+    }
+
+    private static InliningDecision createInliningDecision() {
+        switch(GraalOptions.InliningDecision) {
+            case 1: return new C1StaticSizeBasedInliningDecision();
+            case 2: return new MinimumCodeSizeBasedInliningDecision();
+            case 3: return new DynamicSizeBasedInliningDecision();
+            case 4: return new GreedySizeBasedInliningDecision();
+            case 5: return new GreedyMachineCodeInliningDecision();
+            default:
+                GraalInternalError.shouldNotReachHere();
+                return null;
+        }
+    }
+
+    private static WeightComputationPolicy createWeightComputationPolicy() {
+        switch(GraalOptions.WeightComputationPolicy) {
+            case 0: throw new GraalInternalError("removed because of invokation counter changes");
+            case 1: return new BytecodeSizeBasedWeightComputationPolicy();
+            case 2: return new ComplexityBasedWeightComputationPolicy();
+            case 3: return new CompiledCodeSizeWeightComputationPolicy();
+            default:
+                GraalInternalError.shouldNotReachHere();
+                return null;
+        }
+    }
 }
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java	Tue Nov 27 12:12:02 2012 +0100
@@ -28,8 +28,8 @@
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
+import com.oracle.graal.api.meta.JavaTypeProfile.ProfiledType;
 import com.oracle.graal.api.meta.ResolvedJavaType.Representation;
-import com.oracle.graal.api.meta.JavaTypeProfile.ProfiledType;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
@@ -43,31 +43,97 @@
 import com.oracle.graal.phases.*;
 
 public class InliningUtil {
-
     private static final DebugMetric metricInliningTailDuplication = Debug.metric("InliningTailDuplication");
+    private static final String inliningDecisionsScopeString = "InliningDecisions";
 
     public interface InliningCallback {
-        StructuredGraph buildGraph(ResolvedJavaMethod method);
+        StructuredGraph buildGraph(final ResolvedJavaMethod method);
+    }
+
+    public interface InliningPolicy {
+        void initialize(StructuredGraph graph);
+        boolean continueInlining(StructuredGraph graph);
+        InlineInfo next();
+        void scanInvokes(Iterable<? extends Node> newNodes);
         double inliningWeight(ResolvedJavaMethod caller, ResolvedJavaMethod method, Invoke invoke);
-        void recordMethodContentsAssumption(ResolvedJavaMethod method);
-        void recordConcreteMethodAssumption(ResolvedJavaMethod method, ResolvedJavaType context, ResolvedJavaMethod impl);
+        boolean isWorthInlining(InlineInfo info);
+    }
+
+    public interface WeightComputationPolicy {
+        double computeWeight(ResolvedJavaMethod caller, ResolvedJavaMethod method, Invoke invoke, boolean preferredInvoke);
+    }
+
+    public static void logNotInlinedMethod(InlineInfo info, String msg, Object... args) {
+        logInliningDecision(info, false, msg, args);
+    }
+
+    public static void logInliningDecision(InlineInfo info, boolean success, String msg, final Object... args) {
+        if (shouldLogInliningDecision()) {
+            logInliningDecision(methodName(info), success, msg, args);
+        }
+    }
+
+    public static void logInliningDecision(final String msg, final Object... args) {
+        Debug.scope(inliningDecisionsScopeString, new Runnable() {
+            public void run() {
+                Debug.log(msg, args);
+            }
+        });
     }
 
-    public static String methodName(ResolvedJavaMethod method, Invoke invoke) {
-        if (!Debug.isLogEnabled()) {
-            return null;
-        } else if (invoke != null && invoke.stateAfter() != null) {
+    private static boolean logNotInlinedMethodAndReturnFalse(Invoke invoke, String msg) {
+        if (shouldLogInliningDecision()) {
+            String methodString = invoke.callTarget() == null ? "callTarget=null" : invoke.callTarget().targetName();
+            logInliningDecision(methodString, false, msg, new Object[0]);
+        }
+        return false;
+    }
+
+    private static InlineInfo logNotInlinedMethodAndReturnNull(Invoke invoke, ResolvedJavaMethod method, String msg) {
+        if (shouldLogInliningDecision()) {
+            String methodString = methodName(method, invoke);
+            logInliningDecision(methodString, false, msg, new Object[0]);
+        }
+        return null;
+    }
+
+    private static boolean logNotInlinedMethodAndReturnFalse(Invoke invoke, ResolvedJavaMethod method, String msg) {
+        if (shouldLogInliningDecision()) {
+            String methodString = methodName(method, invoke);
+            logInliningDecision(methodString, false, msg, new Object[0]);
+        }
+        return false;
+    }
+
+    private static void logInliningDecision(final String methodString, final boolean success, final String msg, final Object... args) {
+        String inliningMsg = "inlining " + methodString + ": " + msg;
+        if (!success) {
+            inliningMsg = "not " + inliningMsg;
+        }
+        logInliningDecision(inliningMsg, args);
+    }
+
+    private static boolean shouldLogInliningDecision() {
+        return Debug.scope(inliningDecisionsScopeString, new Callable<Boolean>() {
+            public Boolean call() {
+                return Debug.isLogEnabled();
+            }
+        });
+    }
+
+    private static String methodName(ResolvedJavaMethod method, Invoke invoke) {
+        if (invoke != null && invoke.stateAfter() != null) {
             return methodName(invoke.stateAfter(), invoke.bci()) + ": " + MetaUtil.format("%H.%n(%p):%r", method) + " (" + method.getCodeSize() + " bytes)";
         } else {
             return MetaUtil.format("%H.%n(%p):%r", method) + " (" + method.getCodeSize() + " bytes)";
         }
     }
 
-    public static String methodName(InlineInfo info) {
-        if (!Debug.isLogEnabled()) {
-            return null;
-        } else if (info.invoke != null && info.invoke.stateAfter() != null) {
-            return methodName(info.invoke.stateAfter(), info.invoke.bci()) + ": " + info.toString();
+    private static String methodName(InlineInfo info) {
+        if (info == null) {
+            return "null";
+        } else if (info.invoke() != null && info.invoke().stateAfter() != null) {
+            return methodName(info.invoke().stateAfter(), info.invoke().bci()) + ": " + info.toString();
         } else {
             return info.toString();
         }
@@ -89,63 +155,77 @@
      * The weight is the amortized weight of the additional code - so smaller is better.
      * The level is the number of nested inlinings that lead to this invoke.
      */
-    public abstract static class InlineInfo implements Comparable<InlineInfo> {
-        public final Invoke invoke;
-        public final double weight;
-        public final int level;
-
-        public InlineInfo(Invoke invoke, double weight, int level) {
-            this.invoke = invoke;
-            this.weight = weight;
-            this.level = level;
-        }
-
-        public abstract int compiledCodeSize();
-
-        @Override
-        public int compareTo(InlineInfo o) {
-            return (weight < o.weight) ? -1 : (weight > o.weight) ? 1 : 0;
-        }
-
-        protected static StructuredGraph getGraph(final ResolvedJavaMethod concrete, final InliningCallback callback) {
-            return Debug.scope("GetInliningGraph", concrete, new Callable<StructuredGraph>() {
-                @Override
-                public StructuredGraph call() throws Exception {
-                    return callback.buildGraph(concrete);
-                }
-            });
-        }
-
-        public abstract boolean canDeopt();
+    public interface InlineInfo extends Comparable<InlineInfo> {
+        Invoke invoke();
+        double weight();
+        int level();
+        int compiledCodeSize();
+        int compareTo(InlineInfo o);
 
         /**
          * Performs the inlining described by this object and returns the node that represents the return value of the
          * inlined method (or null for void methods and methods that have no non-exceptional exit).
-         *
-         * @param graph
-         * @param runtime
-         * @param callback
          */
-        public abstract void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback);
+        void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback, Assumptions assumptions);
+    }
+
+    public abstract static class AbstractInlineInfo implements InlineInfo {
+        protected final Invoke invoke;
+        protected final double weight;
+
+        public AbstractInlineInfo(Invoke invoke, double weight) {
+            this.invoke = invoke;
+            this.weight = weight;
+        }
+
+        @Override
+        public int compareTo(InlineInfo o) {
+            return (weight < o.weight()) ? -1 : (weight > o.weight()) ? 1 : 0;
+        }
+
+        public Invoke invoke() {
+            return invoke;
+        }
+
+        public double weight() {
+            return weight;
+        }
+
+        public int level() {
+            return computeInliningLevel(invoke);
+        }
+
+        protected static StructuredGraph getGraph(final Invoke invoke, final ResolvedJavaMethod concrete, final GraalCodeCacheProvider runtime, final InliningCallback callback) {
+            return Debug.scope("GetInliningGraph", concrete, new Callable<StructuredGraph>() {
+                @Override
+                public StructuredGraph call() throws Exception {
+                    StructuredGraph result = getIntrinsicGraph(invoke, concrete, runtime);
+                    if (result == null) {
+                        assert !Modifier.isNative(concrete.getModifiers());
+                        result = callback.buildGraph(concrete);
+                    }
+                    return result;
+                }
+            });
+        }
     }
 
     /**
      * Represents an inlining opportunity where the compiler can statically determine a monomorphic target method and
      * therefore is able to determine the called method exactly.
      */
-    private static class ExactInlineInfo extends InlineInfo {
+    private static class ExactInlineInfo extends AbstractInlineInfo {
         public final ResolvedJavaMethod concrete;
 
-        public ExactInlineInfo(Invoke invoke, double weight, int level, ResolvedJavaMethod concrete) {
-            super(invoke, weight, level);
+        public ExactInlineInfo(Invoke invoke, double weight, ResolvedJavaMethod concrete) {
+            super(invoke, weight);
             this.concrete = concrete;
         }
 
         @Override
-        public void inline(StructuredGraph compilerGraph, GraalCodeCacheProvider runtime, final InliningCallback callback) {
-            StructuredGraph graph = getGraph(concrete, callback);
-            assert !IntrinsificationPhase.canIntrinsify(invoke, concrete, runtime);
-            callback.recordMethodContentsAssumption(concrete);
+        public void inline(StructuredGraph compilerGraph, GraalCodeCacheProvider runtime, InliningCallback callback, Assumptions assumptions) {
+            StructuredGraph graph = getGraph(invoke, concrete, runtime, callback);
+            assumptions.recordMethodContents(concrete);
             InliningUtil.inline(invoke, graph, true);
         }
 
@@ -158,23 +238,18 @@
         public String toString() {
             return "exact " + MetaUtil.format("%H.%n(%p):%r", concrete);
         }
-
-        @Override
-        public boolean canDeopt() {
-            return false;
-        }
     }
 
     /**
      * Represents an inlining opportunity for which profiling information suggests a monomorphic receiver, but for which
      * the receiver type cannot be proven. A type check guard will be generated if this inlining is performed.
      */
-    private static class TypeGuardInlineInfo extends InlineInfo {
+    private static class TypeGuardInlineInfo extends AbstractInlineInfo {
         public final ResolvedJavaMethod concrete;
         public final ResolvedJavaType type;
 
-        public TypeGuardInlineInfo(Invoke invoke, double weight, int level, ResolvedJavaMethod concrete, ResolvedJavaType type) {
-            super(invoke, weight, level);
+        public TypeGuardInlineInfo(Invoke invoke, double weight, ResolvedJavaMethod concrete, ResolvedJavaType type) {
+            super(invoke, weight);
             this.concrete = concrete;
             this.type = type;
         }
@@ -185,7 +260,7 @@
         }
 
         @Override
-        public void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback) {
+        public void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback, Assumptions assumptions) {
             // receiver null check must be before the type check
             InliningUtil.receiverNullCheck(invoke);
             ValueNode receiver = invoke.methodCallTarget().receiver();
@@ -203,9 +278,8 @@
             graph.addBeforeFixed(invoke.node(), guard);
             graph.addBeforeFixed(invoke.node(), anchor);
 
-            StructuredGraph calleeGraph = getGraph(concrete, callback);
-            assert !IntrinsificationPhase.canIntrinsify(invoke, concrete, runtime);
-            callback.recordMethodContentsAssumption(concrete);
+            StructuredGraph calleeGraph = getGraph(invoke, concrete, runtime, callback);
+            assumptions.recordMethodContents(concrete);
             InliningUtil.inline(invoke, calleeGraph, false);
         }
 
@@ -213,26 +287,21 @@
         public String toString() {
             return "type-checked " + MetaUtil.format("%H.%n(%p):%r", concrete);
         }
-
-        @Override
-        public boolean canDeopt() {
-            return true;
-        }
     }
 
     /**
      * Polymorphic inlining of m methods with n type checks (n >= m) in case that the profiling information suggests a reasonable
      * amounts of different receiver types and different methods. If an unknown type is encountered a deoptimization is triggered.
      */
-    private static class MultiTypeGuardInlineInfo extends InlineInfo {
+    private static class MultiTypeGuardInlineInfo extends AbstractInlineInfo {
         public final List<ResolvedJavaMethod> concretes;
         public final ProfiledType[] ptypes;
         public final int[] typesToConcretes;
         public final double notRecordedTypeProbability;
 
-        public MultiTypeGuardInlineInfo(Invoke invoke, double weight, int level, List<ResolvedJavaMethod> concretes, ProfiledType[] ptypes,
+        public MultiTypeGuardInlineInfo(Invoke invoke, double weight, List<ResolvedJavaMethod> concretes, ProfiledType[] ptypes,
                         int[] typesToConcretes, double notRecordedTypeProbability) {
-            super(invoke, weight, level);
+            super(invoke, weight);
             assert concretes.size() > 0 && concretes.size() <= ptypes.length : "must have at least one method but no more than types methods";
             assert ptypes.length == typesToConcretes.length : "array lengths must match";
 
@@ -252,16 +321,16 @@
         }
 
         @Override
-        public void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback) {
+        public void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback, Assumptions assumptions) {
             int numberOfMethods = concretes.size();
             boolean hasReturnValue = invoke.node().kind() != Kind.Void;
 
             // receiver null check must be the first node
             InliningUtil.receiverNullCheck(invoke);
             if (numberOfMethods > 1 || shouldFallbackToInvoke()) {
-                inlineMultipleMethods(graph, runtime, callback, numberOfMethods, hasReturnValue);
+                inlineMultipleMethods(graph, runtime, callback, assumptions, numberOfMethods, hasReturnValue);
             } else {
-                inlineSingleMethod(graph, runtime, callback);
+                inlineSingleMethod(graph, runtime, callback, assumptions);
             }
         }
 
@@ -269,7 +338,7 @@
             return notRecordedTypeProbability > 0;
         }
 
-        private void inlineMultipleMethods(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback, int numberOfMethods, boolean hasReturnValue) {
+        private void inlineMultipleMethods(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback, Assumptions assumptions, int numberOfMethods, boolean hasReturnValue) {
             FixedNode continuation = invoke.next();
 
             ValueNode originalReceiver = invoke.methodCallTarget().receiver();
@@ -300,27 +369,26 @@
             }
 
             // create one separate block for each invoked method
-            BeginNode[] calleeEntryNodes = new BeginNode[numberOfMethods];
+            BeginNode[] successors = new BeginNode[numberOfMethods + 1];
             for (int i = 0; i < numberOfMethods; i++) {
-                int predecessors = 0;
                 double probability = 0;
                 for (int j = 0; j < typesToConcretes.length; j++) {
                     if (typesToConcretes[j] == i) {
-                        predecessors++;
                         probability += ptypes[j].getProbability();
                     }
                 }
 
-                calleeEntryNodes[i] = createInvocationBlock(graph, invoke, returnMerge, returnValuePhi, exceptionMerge, exceptionObjectPhi, predecessors, invoke.probability() * probability, true);
+                successors[i] = createInvocationBlock(graph, invoke, returnMerge, returnValuePhi, exceptionMerge, exceptionObjectPhi, invoke.probability() * probability, true);
             }
 
             // create the successor for an unknown type
-            FixedNode unknownTypeNode;
+            FixedNode unknownTypeSux;
             if (shouldFallbackToInvoke()) {
-                unknownTypeNode = createInvocationBlock(graph, invoke, returnMerge, returnValuePhi, exceptionMerge, exceptionObjectPhi, 1, notRecordedTypeProbability, false);
+                unknownTypeSux = createInvocationBlock(graph, invoke, returnMerge, returnValuePhi, exceptionMerge, exceptionObjectPhi, notRecordedTypeProbability, false);
             } else {
-                unknownTypeNode = graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.TypeCheckedInliningViolated, invoke.leafGraphId()));
+                unknownTypeSux = graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.TypeCheckedInliningViolated, invoke.leafGraphId()));
             }
+            successors[successors.length - 1] = BeginNode.begin(unknownTypeSux);
 
             // replace the invoke exception edge
             if (invoke instanceof InvokeWithExceptionNode) {
@@ -332,11 +400,20 @@
                 GraphUtil.killCFG(invokeWithExceptionNode.exceptionEdge());
             }
 
+            // get all graphs and record assumptions
+            assert invoke.node().isAlive();
+            StructuredGraph[] calleeGraphs = new StructuredGraph[numberOfMethods];
+            for (int i = 0; i < numberOfMethods; i++) {
+                ResolvedJavaMethod concrete = concretes.get(i);
+                calleeGraphs[i] = getGraph(invoke, concrete, runtime, callback);
+                assumptions.recordMethodContents(concrete);
+            }
+
             // replace the invoke with a switch on the type of the actual receiver
             Kind hubKind = invoke.methodCallTarget().targetMethod().getDeclaringClass().getEncoding(Representation.ObjectHub).getKind();
             LoadHubNode receiverHub = graph.add(new LoadHubNode(invoke.methodCallTarget().receiver(), hubKind));
             graph.addBeforeFixed(invoke.node(), receiverHub);
-            FixedNode dispatchOnType = createDispatchOnType(graph, receiverHub, calleeEntryNodes, unknownTypeNode);
+            FixedNode dispatchOnType = createDispatchOnType(graph, receiverHub, successors);
 
             assert invoke.next() == continuation;
             invoke.setNext(null);
@@ -347,8 +424,8 @@
             ArrayList<PiNode> replacements = new ArrayList<>();
 
             // do the actual inlining for every invoke
-            for (int i = 0; i < calleeEntryNodes.length; i++) {
-                BeginNode node = calleeEntryNodes[i];
+            for (int i = 0; i < numberOfMethods; i++) {
+                BeginNode node = successors[i];
                 Invoke invokeForInlining = (Invoke) node.next();
 
                 ResolvedJavaType commonType = getLeastCommonType(i);
@@ -357,10 +434,7 @@
                 PiNode anchoredReceiver = createAnchoredReceiver(graph, node, commonType, receiver, exact);
                 invokeForInlining.callTarget().replaceFirstInput(receiver, anchoredReceiver);
 
-                ResolvedJavaMethod concrete = concretes.get(i);
-                StructuredGraph calleeGraph = getGraph(concrete, callback);
-                callback.recordMethodContentsAssumption(concrete);
-                assert !IntrinsificationPhase.canIntrinsify(invokeForInlining, concrete, runtime);
+                StructuredGraph calleeGraph = calleeGraphs[i];
                 InliningUtil.inline(invokeForInlining, calleeGraph, false);
                 replacements.add(anchoredReceiver);
             }
@@ -415,62 +489,54 @@
             return commonType;
         }
 
-        private void inlineSingleMethod(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback) {
+        private void inlineSingleMethod(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback, Assumptions assumptions) {
             assert concretes.size() == 1 && ptypes.length > 1 && !shouldFallbackToInvoke() && notRecordedTypeProbability == 0;
 
-            MergeNode calleeEntryNode = graph.add(new MergeNode());
+            BeginNode calleeEntryNode = graph.add(new BeginNode());
             calleeEntryNode.setProbability(invoke.probability());
             Kind hubKind = invoke.methodCallTarget().targetMethod().getDeclaringClass().getEncoding(Representation.ObjectHub).getKind();
             LoadHubNode receiverHub = graph.add(new LoadHubNode(invoke.methodCallTarget().receiver(), hubKind));
             graph.addBeforeFixed(invoke.node(), receiverHub);
 
-            FixedNode unknownTypeNode = graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.TypeCheckedInliningViolated, invoke.leafGraphId()));
-            FixedNode dispatchOnType = createDispatchOnType(graph, receiverHub, new BeginNode[] {calleeEntryNode}, unknownTypeNode);
+            BeginNode unknownTypeSux = BeginNode.begin(graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.TypeCheckedInliningViolated, invoke.leafGraphId())));
+            BeginNode[] successors = new BeginNode[] {calleeEntryNode, unknownTypeSux};
+            FixedNode dispatchOnType = createDispatchOnType(graph, receiverHub, successors);
 
             FixedWithNextNode pred = (FixedWithNextNode) invoke.node().predecessor();
             pred.setNext(dispatchOnType);
             calleeEntryNode.setNext(invoke.node());
 
             ResolvedJavaMethod concrete = concretes.get(0);
-            StructuredGraph calleeGraph = getGraph(concrete, callback);
-            assert !IntrinsificationPhase.canIntrinsify(invoke, concrete, runtime);
-            callback.recordMethodContentsAssumption(concrete);
+            StructuredGraph calleeGraph = getGraph(invoke, concrete, runtime, callback);
+            assumptions.recordMethodContents(concrete);
             InliningUtil.inline(invoke, calleeGraph, false);
         }
 
-        private FixedNode createDispatchOnType(StructuredGraph graph, LoadHubNode hub, BeginNode[] calleeEntryNodes, FixedNode unknownTypeSux) {
+        private FixedNode createDispatchOnType(StructuredGraph graph, LoadHubNode hub, BeginNode[] successors) {
             assert ptypes.length > 1;
 
-            ResolvedJavaType[] types = new ResolvedJavaType[ptypes.length];
-            double[] probabilities = new double[ptypes.length + 1];
-            BeginNode[] successors = new BeginNode[ptypes.length + 1];
+            ResolvedJavaType[] keys = new ResolvedJavaType[ptypes.length];
+            double[] keyProbabilities = new double[ptypes.length + 1];
             int[] keySuccessors = new int[ptypes.length + 1];
             for (int i = 0; i < ptypes.length; i++) {
-                types[i] = ptypes[i].getType();
-                probabilities[i] = ptypes[i].getProbability();
-                FixedNode entry = calleeEntryNodes[typesToConcretes[i]];
-                if (entry instanceof MergeNode) {
-                    EndNode endNode = graph.add(new EndNode());
-                    ((MergeNode) entry).addForwardEnd(endNode);
-                    entry = endNode;
-                }
-                successors[i] = BeginNode.begin(entry);
-                keySuccessors[i] = i;
+                keys[i] = ptypes[i].getType();
+                keyProbabilities[i] = ptypes[i].getProbability();
+                keySuccessors[i] = typesToConcretes[i];
+                assert keySuccessors[i] < successors.length - 1 : "last successor is the unknownTypeSux";
             }
-            assert !(unknownTypeSux instanceof MergeNode);
-            successors[successors.length - 1] = BeginNode.begin(unknownTypeSux);
-            probabilities[successors.length - 1] = notRecordedTypeProbability;
-            keySuccessors[successors.length - 1] = successors.length - 1;
+            keyProbabilities[keyProbabilities.length - 1] = notRecordedTypeProbability;
+            keySuccessors[keySuccessors.length - 1] = successors.length - 1;
 
-            TypeSwitchNode typeSwitch = graph.add(new TypeSwitchNode(hub, successors, probabilities, types, probabilities, keySuccessors));
+            double[] successorProbabilities = SwitchNode.successorProbabilites(successors.length, keySuccessors, keyProbabilities);
+            TypeSwitchNode typeSwitch = graph.add(new TypeSwitchNode(hub, successors, successorProbabilities, keys, keyProbabilities, keySuccessors));
 
             return typeSwitch;
         }
 
         private static BeginNode createInvocationBlock(StructuredGraph graph, Invoke invoke, MergeNode returnMerge, PhiNode returnValuePhi,
-                        MergeNode exceptionMerge, PhiNode exceptionObjectPhi, int predecessors, double probability, boolean useForInlining) {
+                        MergeNode exceptionMerge, PhiNode exceptionObjectPhi, double probability, boolean useForInlining) {
             Invoke duplicatedInvoke = duplicateInvokeForInlining(graph, invoke, exceptionMerge, exceptionObjectPhi, useForInlining, probability);
-            BeginNode calleeEntryNode = graph.add(predecessors > 1 ? new MergeNode() : new BeginNode());
+            BeginNode calleeEntryNode = graph.add(new BeginNode());
             calleeEntryNode.setNext(duplicatedInvoke.node());
             calleeEntryNode.setProbability(probability);
 
@@ -534,11 +600,6 @@
             }
             return builder.toString();
         }
-
-        @Override
-        public boolean canDeopt() {
-            return true;
-        }
     }
 
 
@@ -549,74 +610,61 @@
     private static class AssumptionInlineInfo extends ExactInlineInfo {
         public final ResolvedJavaType context;
 
-        public AssumptionInlineInfo(Invoke invoke, double weight, int level, ResolvedJavaType context, ResolvedJavaMethod concrete) {
-            super(invoke, weight, level, concrete);
+        public AssumptionInlineInfo(Invoke invoke, double weight, ResolvedJavaType context, ResolvedJavaMethod concrete) {
+            super(invoke, weight, concrete);
             this.context = context;
         }
 
         @Override
-        public void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback) {
+        public void inline(StructuredGraph graph, GraalCodeCacheProvider runtime, InliningCallback callback, Assumptions assumptions) {
             if (Debug.isLogEnabled()) {
                 String targetName = MetaUtil.format("%H.%n(%p):%r", invoke.methodCallTarget().targetMethod());
                 String concreteName = MetaUtil.format("%H.%n(%p):%r", concrete);
                 Debug.log("recording concrete method assumption: %s on receiver type %s -> %s", targetName, context, concreteName);
             }
-            callback.recordConcreteMethodAssumption(invoke.methodCallTarget().targetMethod(), context, concrete);
+            assumptions.recordConcreteMethod(invoke.methodCallTarget().targetMethod(), context, concrete);
 
-            super.inline(graph, runtime, callback);
+            super.inline(graph, runtime, callback, assumptions);
         }
 
         @Override
         public String toString() {
             return "assumption " + MetaUtil.format("%H.%n(%p):%r", concrete);
         }
-
-        @Override
-        public boolean canDeopt() {
-            return true;
-        }
     }
 
     /**
      * Determines if inlining is possible at the given invoke node.
      * @param invoke the invoke that should be inlined
-     * @param level the number of nested inlinings that lead to this invoke, or 0 if the invoke was part of the initial graph
      * @param runtime a GraalRuntime instance used to determine of the invoke can be inlined and/or should be intrinsified
-     * @param callback a callback that is used to determine the weight of a specific inlining
+     * @param inliningPolicy used to determine the weight of a specific inlining
      * @return an instance of InlineInfo, or null if no inlining is possible at the given invoke
      */
-    public static InlineInfo getInlineInfo(Invoke invoke, int level, GraalCodeCacheProvider runtime, Assumptions assumptions, InliningCallback callback, OptimisticOptimizations optimisticOpts) {
-        if (!(invoke.callTarget() instanceof MethodCallTargetNode)) {
-            // The invoke has already been lowered , or has been created as a low-level node. We have no method information.
-            return null;
-        }
-        ResolvedJavaMethod parent = invoke.stateAfter().method();
-        MethodCallTargetNode callTarget = invoke.methodCallTarget();
-        ResolvedJavaMethod targetMethod = callTarget.targetMethod();
-        if (targetMethod == null) {
-            return null;
-        }
+    public static InlineInfo getInlineInfo(Invoke invoke, GraalCodeCacheProvider runtime, Assumptions assumptions, InliningPolicy inliningPolicy, OptimisticOptimizations optimisticOpts) {
         if (!checkInvokeConditions(invoke)) {
             return null;
         }
+        ResolvedJavaMethod caller = getCaller(invoke);
+        MethodCallTargetNode callTarget = invoke.methodCallTarget();
+        ResolvedJavaMethod targetMethod = callTarget.targetMethod();
 
         if (callTarget.invokeKind() == InvokeKind.Special || targetMethod.canBeStaticallyBound()) {
-            if (checkTargetConditions(invoke, targetMethod, optimisticOpts)) {
-                double weight = callback == null ? 0 : callback.inliningWeight(parent, targetMethod, invoke);
-                return new ExactInlineInfo(invoke, weight, level, targetMethod);
+            if (!checkTargetConditions(invoke, targetMethod, optimisticOpts, runtime)) {
+                return null;
             }
-            return null;
+            double weight = inliningPolicy.inliningWeight(caller, targetMethod, invoke);
+            return new ExactInlineInfo(invoke, weight, targetMethod);
         }
         ObjectStamp receiverStamp = callTarget.receiver().objectStamp();
         ResolvedJavaType receiverType = receiverStamp.type();
         if (receiverStamp.isExactType()) {
             assert receiverType.isSubtypeOf(targetMethod.getDeclaringClass()) : receiverType + " subtype of " + targetMethod.getDeclaringClass() + " for " + targetMethod;
             ResolvedJavaMethod resolved = receiverType.resolveMethod(targetMethod);
-            if (checkTargetConditions(invoke, resolved, optimisticOpts)) {
-                double weight = callback == null ? 0 : callback.inliningWeight(parent, resolved, invoke);
-                return new ExactInlineInfo(invoke, weight, level, resolved);
+            if (!checkTargetConditions(invoke, resolved, optimisticOpts, runtime)) {
+                return null;
             }
-            return null;
+            double weight = inliningPolicy.inliningWeight(caller, resolved, invoke);
+            return new ExactInlineInfo(invoke, weight, resolved);
         }
         ResolvedJavaType holder = targetMethod.getDeclaringClass();
 
@@ -628,162 +676,146 @@
             }
         }
         // TODO (thomaswue) fix this
-        if (assumptions != null) {
+        if (assumptions.useOptimisticAssumptions()) {
             ResolvedJavaMethod concrete = holder.findUniqueConcreteMethod(targetMethod);
             if (concrete != null) {
-                if (checkTargetConditions(invoke, concrete, optimisticOpts)) {
-                    double weight = callback == null ? 0 : callback.inliningWeight(parent, concrete, invoke);
-                    return new AssumptionInlineInfo(invoke, weight, level, holder, concrete);
+                if (!checkTargetConditions(invoke, concrete, optimisticOpts, runtime)) {
+                    return null;
                 }
-                return null;
+                double weight = inliningPolicy.inliningWeight(caller, concrete, invoke);
+                return new AssumptionInlineInfo(invoke, weight, holder, concrete);
             }
         }
 
         // type check based inlining
-        return getTypeCheckedInlineInfo(invoke, level, callback, parent, targetMethod, optimisticOpts);
+        return getTypeCheckedInlineInfo(invoke, inliningPolicy, caller, targetMethod, optimisticOpts, runtime);
     }
 
-    private static InlineInfo getTypeCheckedInlineInfo(Invoke invoke, int level, InliningCallback callback, ResolvedJavaMethod parent, ResolvedJavaMethod targetMethod, OptimisticOptimizations optimisticOpts) {
-        ProfilingInfo profilingInfo = parent.getProfilingInfo();
+    private static InlineInfo getTypeCheckedInlineInfo(Invoke invoke, InliningPolicy inliningPolicy, ResolvedJavaMethod caller,
+                    ResolvedJavaMethod targetMethod, OptimisticOptimizations optimisticOpts, GraalCodeCacheProvider runtime) {
+        ProfilingInfo profilingInfo = caller.getProfilingInfo();
         JavaTypeProfile typeProfile = profilingInfo.getTypeProfile(invoke.bci());
-        if (typeProfile != null) {
-            ProfiledType[] ptypes = typeProfile.getTypes();
-
-            if (ptypes != null && ptypes.length > 0) {
-                double notRecordedTypeProbability = typeProfile.getNotRecordedProbability();
-                if (ptypes.length == 1 && notRecordedTypeProbability == 0) {
-                    if (optimisticOpts.inlineMonomorphicCalls()) {
-                        ResolvedJavaType type = ptypes[0].getType();
-                        ResolvedJavaMethod concrete = type.resolveMethod(targetMethod);
-                        if (checkTargetConditions(invoke, concrete, optimisticOpts)) {
-                            double weight = callback == null ? 0 : callback.inliningWeight(parent, concrete, invoke);
-                            return new TypeGuardInlineInfo(invoke, weight, level, concrete, type);
-                        }
-
-                        Debug.log("not inlining %s because method can't be inlined", methodName(targetMethod, invoke));
-                        return null;
-                    } else {
-                        Debug.log("not inlining %s because GraalOptions.InlineMonomorphicCalls == false", methodName(targetMethod, invoke));
-                        return null;
-                    }
-                } else {
-                    invoke.setPolymorphic(true);
-                    if (optimisticOpts.inlinePolymorphicCalls() && notRecordedTypeProbability == 0 || optimisticOpts.inlineMegamorphicCalls() && notRecordedTypeProbability > 0) {
-                        // TODO (chaeubl) inlining of multiple methods should work differently
-                        // 1. check which methods can be inlined
-                        // 2. for those methods, use weight and probability to compute which of them should be inlined
-                        // 3. do the inlining
-                        //    a) all seen methods can be inlined -> do so and guard with deopt
-                        //    b) some methods can be inlined -> inline them and fall back to invocation if violated
-                        // TODO (chaeubl) sort types by probability
+        if (typeProfile == null) {
+            return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "no type profile exists");
+        }
 
-                        // determine concrete methods and map type to specific method
-                        ArrayList<ResolvedJavaMethod> concreteMethods = new ArrayList<>();
-                        int[] typesToConcretes = new int[ptypes.length];
-                        for (int i = 0; i < ptypes.length; i++) {
-                            ResolvedJavaMethod concrete = ptypes[i].getType().resolveMethod(targetMethod);
-
-                            int index = concreteMethods.indexOf(concrete);
-                            if (index < 0) {
-                                index = concreteMethods.size();
-                                concreteMethods.add(concrete);
-                            }
-                            typesToConcretes[i] = index;
-                        }
+        ProfiledType[] ptypes = typeProfile.getTypes();
+        if (ptypes == null || ptypes.length <= 0) {
+            return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "no types/probabilities were recorded");
+        }
 
-                        double totalWeight = 0;
-                        boolean canInline = true;
-                        for (ResolvedJavaMethod concrete: concreteMethods) {
-                            if (!checkTargetConditions(invoke, concrete, optimisticOpts)) {
-                                canInline = false;
-                                break;
-                            }
-                            totalWeight += callback == null ? 0 : callback.inliningWeight(parent, concrete, invoke);
-                        }
-
-                        if (canInline) {
-                            return new MultiTypeGuardInlineInfo(invoke, totalWeight, level, concreteMethods, ptypes, typesToConcretes, notRecordedTypeProbability);
-                        } else {
-                            Debug.log("not inlining %s because it is a polymorphic method call and at least one invoked method cannot be inlined", methodName(targetMethod, invoke));
-                            return null;
-                        }
-                    } else {
-                        if (!optimisticOpts.inlinePolymorphicCalls() && notRecordedTypeProbability == 0) {
-                            Debug.log("not inlining %s because GraalOptions.InlinePolymorphicCalls == false", methodName(targetMethod, invoke));
-                        } else {
-                            Debug.log("not inlining %s because GraalOptions.InlineMegamorphicCalls == false", methodName(targetMethod, invoke));
-                        }
-                        return null;
-                    }
-                }
+        double notRecordedTypeProbability = typeProfile.getNotRecordedProbability();
+        if (ptypes.length == 1 && notRecordedTypeProbability == 0) {
+            if (!optimisticOpts.inlineMonomorphicCalls()) {
+                return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "inlining monomorphic calls is disabled");
             }
 
-            Debug.log("not inlining %s because no types/probabilities were recorded", methodName(targetMethod, invoke));
-            return null;
+            ResolvedJavaType type = ptypes[0].getType();
+            ResolvedJavaMethod concrete = type.resolveMethod(targetMethod);
+            if (!checkTargetConditions(invoke, concrete, optimisticOpts, runtime)) {
+                return null;
+            }
+            double weight = inliningPolicy.inliningWeight(caller, concrete, invoke);
+            return new TypeGuardInlineInfo(invoke, weight, concrete, type);
         } else {
-            Debug.log("not inlining %s because no type profile exists", methodName(targetMethod, invoke));
-            return null;
+            invoke.setPolymorphic(true);
+
+
+            if (!optimisticOpts.inlinePolymorphicCalls() && notRecordedTypeProbability == 0) {
+                return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "inlining polymorphic calls is disabled");
+            }
+            if (!optimisticOpts.inlineMegamorphicCalls() && notRecordedTypeProbability > 0) {
+                return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "inlining megamorphic calls is disabled");
+            }
+
+            // TODO (chaeubl) inlining of multiple methods should work differently
+            // 1. check which methods can be inlined
+            // 2. for those methods, use weight and probability to compute which of them should be inlined
+            // 3. do the inlining
+            //    a) all seen methods can be inlined -> do so and guard with deopt
+            //    b) some methods can be inlined -> inline them and fall back to invocation if violated
+
+            // determine concrete methods and map type to specific method
+            ArrayList<ResolvedJavaMethod> concreteMethods = new ArrayList<>();
+            int[] typesToConcretes = new int[ptypes.length];
+            for (int i = 0; i < ptypes.length; i++) {
+                ResolvedJavaMethod concrete = ptypes[i].getType().resolveMethod(targetMethod);
+
+                int index = concreteMethods.indexOf(concrete);
+                if (index < 0) {
+                    index = concreteMethods.size();
+                    concreteMethods.add(concrete);
+                }
+                typesToConcretes[i] = index;
+            }
+
+            double totalWeight = 0;
+            for (ResolvedJavaMethod concrete: concreteMethods) {
+                if (!checkTargetConditions(invoke, concrete, optimisticOpts, runtime)) {
+                    return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "it is a polymorphic method call and at least one invoked method cannot be inlined");
+                }
+                totalWeight += inliningPolicy.inliningWeight(caller, concrete, invoke);
+            }
+            return new MultiTypeGuardInlineInfo(invoke, totalWeight, concreteMethods, ptypes, typesToConcretes, notRecordedTypeProbability);
         }
     }
 
+
+    private static ResolvedJavaMethod getCaller(Invoke invoke) {
+        return invoke.stateAfter().method();
+    }
+
     private static PiNode createAnchoredReceiver(StructuredGraph graph, FixedNode anchor, ResolvedJavaType commonType, ValueNode receiver, boolean exact) {
         // to avoid that floating reads on receiver fields float above the type check
         return graph.unique(new PiNode(receiver, anchor, exact ? StampFactory.exactNonNull(commonType) : StampFactory.declaredNonNull(commonType)));
     }
 
     private static boolean checkInvokeConditions(Invoke invoke) {
-        if (invoke.stateAfter() == null) {
-            Debug.log("not inlining %s because the invoke has no after state", methodName(invoke.methodCallTarget().targetMethod(), invoke));
-            return false;
+        if (!(invoke.callTarget() instanceof MethodCallTargetNode)) {
+            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke has already been lowered, or has been created as a low-level node");
+        } else if (invoke.methodCallTarget().targetMethod() == null) {
+            return logNotInlinedMethodAndReturnFalse(invoke, "target method is null");
+        } else if (invoke.stateAfter() == null) {
+            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke has no after state");
+        } else if (invoke.predecessor() == null || !invoke.node().isAlive()) {
+            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke is dead code");
+        } else if (!invoke.useForInlining()) {
+            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke is marked to be not used for inlining");
+        } else {
+            return true;
         }
-        if (invoke.predecessor() == null) {
-            Debug.log("not inlining %s because the invoke is dead code", methodName(invoke.methodCallTarget().targetMethod(), invoke));
-            return false;
-        }
-        if (!invoke.useForInlining()) {
-            Debug.log("not inlining %s because invoke is marked to be not used for inlining", methodName(invoke.methodCallTarget().targetMethod(), invoke));
-            return false;
-        }
-        return true;
     }
 
-    private static boolean checkTargetConditions(Invoke invoke, JavaMethod method, OptimisticOptimizations optimisticOpts) {
+    private static boolean checkTargetConditions(Invoke invoke, ResolvedJavaMethod method, OptimisticOptimizations optimisticOpts, GraalCodeCacheProvider runtime) {
         if (method == null) {
-            Debug.log("not inlining because method is not resolved");
-            return false;
-        }
-        if (!(method instanceof ResolvedJavaMethod)) {
-            Debug.log("not inlining %s because it is unresolved", method.toString());
-            return false;
-        }
-        ResolvedJavaMethod resolvedMethod = (ResolvedJavaMethod) method;
-        if (Modifier.isNative(resolvedMethod.getModifiers())) {
-            Debug.log("not inlining %s because it is a native method", methodName(resolvedMethod, invoke));
-            return false;
-        }
-        if (Modifier.isAbstract(resolvedMethod.getModifiers())) {
-            Debug.log("not inlining %s because it is an abstract method", methodName(resolvedMethod, invoke));
-            return false;
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "the method is not resolved");
+        } else if (Modifier.isNative(method.getModifiers()) && (!GraalOptions.Intrinsify || !InliningUtil.canIntrinsify(invoke, method, runtime))) {
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "it is a non-intrinsic native method");
+        } else if (Modifier.isAbstract(method.getModifiers())) {
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "it is an abstract method");
+        } else if (!method.getDeclaringClass().isInitialized()) {
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "the method's class is not initialized");
+        } else if (!method.canBeInlined()) {
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "it is marked non-inlinable");
+        } else if (computeInliningLevel(invoke) > GraalOptions.MaximumInlineLevel) {
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "it exceeds the maximum inlining depth");
+        } else if (computeRecursiveInliningLevel(invoke.stateAfter(), method) > GraalOptions.MaximumRecursiveInlining) {
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "it exceeds the maximum recursive inlining depth");
+        } else if (new OptimisticOptimizations(method).lessOptimisticThan(optimisticOpts)) {
+            return logNotInlinedMethodAndReturnFalse(invoke, method, "the callee uses less optimistic optimizations than caller");
+        } else {
+            return true;
         }
-        if (!resolvedMethod.getDeclaringClass().isInitialized()) {
-            Debug.log("not inlining %s because of non-initialized class", methodName(resolvedMethod, invoke));
-            return false;
-        }
-        if (!resolvedMethod.canBeInlined()) {
-            Debug.log("not inlining %s because it is marked non-inlinable", methodName(resolvedMethod, invoke));
-            return false;
+    }
+
+    private static int computeInliningLevel(Invoke invoke) {
+        int count = -1;
+        FrameState curState = invoke.stateAfter();
+        while (curState != null) {
+            count++;
+            curState = curState.outerFrameState();
         }
-        if (computeRecursiveInliningLevel(invoke.stateAfter(), (ResolvedJavaMethod) method) > GraalOptions.MaximumRecursiveInlining) {
-            Debug.log("not inlining %s because it exceeds the maximum recursive inlining depth", methodName(resolvedMethod, invoke));
-            return false;
-        }
-        OptimisticOptimizations calleeOpts = new OptimisticOptimizations(resolvedMethod);
-        if (calleeOpts.lessOptimisticThan(optimisticOpts)) {
-            Debug.log("not inlining %s because callee uses less optimistic optimizations than caller", methodName(resolvedMethod, invoke));
-            return false;
-        }
-
-        return true;
+        return count;
     }
 
     private static int computeRecursiveInliningLevel(FrameState state, ResolvedJavaMethod method) {
@@ -950,4 +982,19 @@
             graph.addBeforeFixed(invoke.node(), graph.add(new FixedGuardNode(graph.unique(new IsNullNode(firstParam)), DeoptimizationReason.NullCheckException, DeoptimizationAction.InvalidateReprofile, true, invoke.leafGraphId())));
         }
     }
+
+    public static boolean canIntrinsify(Invoke invoke, ResolvedJavaMethod target, GraalCodeCacheProvider runtime) {
+        return getIntrinsicGraph(invoke, target, runtime) != null;
+    }
+
+    private static StructuredGraph getIntrinsicGraph(Invoke invoke, ResolvedJavaMethod target, GraalCodeCacheProvider runtime) {
+        assert invoke.node().isAlive();
+
+        StructuredGraph intrinsicGraph = (StructuredGraph) target.getCompilerStorage().get(Graph.class);
+        if (intrinsicGraph == null) {
+            // TODO remove once all intrinsics are available via compilerStorage
+            intrinsicGraph = runtime.intrinsicGraph(invoke.stateAfter().method(), invoke.bci(), target, invoke.callTarget().arguments());
+        }
+        return intrinsicGraph;
+    }
 }
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/IntrinsificationPhase.java	Mon Nov 26 19:30:54 2012 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.phases.common;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.graal.phases.*;
-
-public class IntrinsificationPhase extends Phase {
-
-    private final GraalCodeCacheProvider runtime;
-
-    public IntrinsificationPhase(GraalCodeCacheProvider runtime) {
-        this.runtime = runtime;
-    }
-
-    @Override
-    protected void run(StructuredGraph graph) {
-        for (InvokeNode invoke : graph.getNodes(InvokeNode.class)) {
-            tryIntrinsify(invoke, runtime);
-        }
-        for (InvokeWithExceptionNode invoke : graph.getNodes(InvokeWithExceptionNode.class)) {
-            tryIntrinsify(invoke, runtime);
-        }
-    }
-
-    public static boolean canIntrinsify(Invoke invoke, ResolvedJavaMethod target, GraalCodeCacheProvider runtime) {
-        return getIntrinsicGraph(invoke, target, runtime) != null;
-    }
-
-    private static void tryIntrinsify(Invoke invoke, GraalCodeCacheProvider runtime) {
-        if (invoke.callTarget() instanceof MethodCallTargetNode && invoke.methodCallTarget().targetMethod() != null) {
-            tryIntrinsify(invoke, invoke.methodCallTarget().targetMethod(), runtime);
-        }
-    }
-
-    private static void tryIntrinsify(Invoke invoke, ResolvedJavaMethod target, GraalCodeCacheProvider runtime) {
-        StructuredGraph intrinsicGraph = getIntrinsicGraph(invoke, target, runtime);
-        if (intrinsicGraph != null) {
-            Debug.log(" > Intrinsify %s", target);
-            InliningUtil.inline(invoke, intrinsicGraph, true);
-        }
-    }
-
-    private static StructuredGraph getIntrinsicGraph(Invoke invoke, ResolvedJavaMethod target, GraalCodeCacheProvider runtime) {
-        StructuredGraph intrinsicGraph = (StructuredGraph) target.getCompilerStorage().get(Graph.class);
-        if (intrinsicGraph == null && runtime != null) {
-            // TODO remove once all intrinsics are available via compilerStorage
-            intrinsicGraph = runtime.intrinsicGraph(invoke.stateAfter().method(), invoke.bci(), target, invoke.callTarget().arguments());
-        }
-        return intrinsicGraph;
-    }
-}
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java	Tue Nov 27 12:12:02 2012 +0100
@@ -43,7 +43,8 @@
            static boolean InlineMonomorphicCalls             = true;
            static boolean InlinePolymorphicCalls             = true;
            static boolean InlineMegamorphicCalls             = ____;
-    public static int     InliningPolicy                     = 4;
+    public static int     InliningPolicy                     = 0;
+    public static int     InliningDecision                   = 4;
     public static int     WeightComputationPolicy            = 2;
     public static int     MaximumTrivialSize                 = 10;
     public static int     MaximumInlineLevel                 = 30;
@@ -172,7 +173,7 @@
     public static boolean ResolveClassBeforeStaticInvoke     = true;
 
     // Translating tableswitch instructions
-    public static int     SequentialSwitchLimit              = 4;
+    public static int     MinimumJumpTableSize               = 5;
     public static int     RangeTestsSwitchDensity            = 5;
     public static double  MinTableSwitchDensity              = 0.5;
 
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/OptimisticOptimizations.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/OptimisticOptimizations.java	Tue Nov 27 12:12:02 2012 +0100
@@ -59,19 +59,23 @@
         if (checkDeoptimizations(profilingInfo, DeoptimizationReason.NotCompiledExceptionHandler)) {
             enabledOpts.add(Optimization.UseExceptionProbability);
         }
+
+        log(method);
     }
 
     private OptimisticOptimizations(Set<Optimization> enabledOpts) {
         this.enabledOpts = enabledOpts;
     }
 
-    public void log(JavaMethod method) {
-        for (Optimization opt: Optimization.values()) {
-            if (!enabledOpts.contains(opt)) {
-                if (GraalOptions.PrintDisabledOptimisticOptimizations) {
-                    TTY.println("WARN: deactivated optimistic optimization %s for %s", opt.name(), MetaUtil.format("%H.%n(%p)", method));
+    private void log(ResolvedJavaMethod method) {
+        if (Debug.isLogEnabled()) {
+            for (Optimization opt: Optimization.values()) {
+                if (!enabledOpts.contains(opt)) {
+                    if (GraalOptions.PrintDisabledOptimisticOptimizations) {
+                        Debug.log("WARN: deactivated optimistic optimization %s for %s", opt.name(), MetaUtil.format("%H.%n(%p)", method));
+                    }
+                    disabledOptimisticOptsMetric.increment();
                 }
-                disabledOptimisticOptsMetric.increment();
             }
         }
     }
--- a/graal/com.oracle.graal.snippets.test/src/com/oracle/graal/snippets/WordTest.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.snippets.test/src/com/oracle/graal/snippets/WordTest.java	Tue Nov 27 12:12:02 2012 +0100
@@ -32,7 +32,7 @@
 import com.oracle.graal.compiler.*;
 import com.oracle.graal.compiler.test.*;
 import com.oracle.graal.nodes.*;
-import com.oracle.graal.snippets.Snippet.InliningPolicy;
+import com.oracle.graal.snippets.Snippet.SnippetInliningPolicy;
 
 /**
  * Tests for the {@link Word} type.
@@ -43,10 +43,10 @@
 
     public WordTest() {
         TargetDescription target = Graal.getRequiredCapability(GraalCompiler.class).target;
-        installer = new SnippetInstaller(runtime, target);
+        installer = new SnippetInstaller(runtime, target, new Assumptions(false));
     }
 
-    private static final ThreadLocal<InliningPolicy> inliningPolicy = new ThreadLocal<>();
+    private static final ThreadLocal<SnippetInliningPolicy> inliningPolicy = new ThreadLocal<>();
 
     @Override
     protected StructuredGraph parse(Method m) {
@@ -114,9 +114,9 @@
 
     @Test
     public void test_fromObject() {
-        inliningPolicy.set(new InliningPolicy() {
+        inliningPolicy.set(new SnippetInliningPolicy() {
             public boolean shouldInline(ResolvedJavaMethod method, ResolvedJavaMethod caller) {
-                return InliningPolicy.Default.shouldInline(method, caller) && !method.getName().equals("hashCode");
+                return SnippetInliningPolicy.Default.shouldInline(method, caller) && !method.getName().equals("hashCode");
             }
         });
         test("fromToObject", "object1", "object2");
--- a/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/InstanceOfSnippetsTemplates.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/InstanceOfSnippetsTemplates.java	Tue Nov 27 12:12:02 2012 +0100
@@ -55,8 +55,8 @@
  */
 public abstract class InstanceOfSnippetsTemplates<T extends SnippetsInterface> extends AbstractTemplates<T> {
 
-    public InstanceOfSnippetsTemplates(MetaAccessProvider runtime, TargetDescription target, Class<T> snippetsClass) {
-        super(runtime, target, snippetsClass);
+    public InstanceOfSnippetsTemplates(MetaAccessProvider runtime, Assumptions assumptions, TargetDescription target, Class<T> snippetsClass) {
+        super(runtime, assumptions, target, snippetsClass);
     }
 
     /**
@@ -92,7 +92,7 @@
                 replacer.replaceUsingInstantiation();
             } else {
                 KeyAndArguments keyAndArguments = getKeyAndArguments(replacer, tool);
-                SnippetTemplate template = cache.get(keyAndArguments.key);
+                SnippetTemplate template = cache.get(keyAndArguments.key, assumptions);
                 template.instantiate(runtime, instanceOf, replacer, tool.lastFixedNode(), keyAndArguments.arguments);
             }
         }
--- a/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/Snippet.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/Snippet.java	Tue Nov 27 12:12:02 2012 +0100
@@ -46,14 +46,14 @@
 
     /**
      * Specifies the class defining the inlining policy for this snippet.
-     * A {@linkplain InliningPolicy#Default default} policy is used if none is supplied.
+     * A {@linkplain SnippetInliningPolicy#Default default} policy is used if none is supplied.
      */
-    Class<? extends InliningPolicy> inlining() default InliningPolicy.class;
+    Class<? extends SnippetInliningPolicy> inlining() default SnippetInliningPolicy.class;
 
     /**
      * Guides inlining decisions used when installing a snippet.
      */
-    public interface InliningPolicy {
+    public interface SnippetInliningPolicy {
         /**
          * Determines if {@code method} should be inlined into {@code caller}.
          */
@@ -69,7 +69,7 @@
          * <li>constructors of {@link Throwable} classes</li>
          * </ul>
          */
-        InliningPolicy Default = new InliningPolicy() {
+        SnippetInliningPolicy Default = new SnippetInliningPolicy() {
             public boolean shouldInline(ResolvedJavaMethod method, ResolvedJavaMethod caller) {
                 if (Modifier.isNative(method.getModifiers())) {
                     return false;
--- a/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetInstaller.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetInstaller.java	Tue Nov 27 12:12:02 2012 +0100
@@ -1,211 +1,213 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.snippets;
-
-import java.lang.reflect.*;
-import java.util.*;
-import java.util.concurrent.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.Node.NodeIntrinsic;
-import com.oracle.graal.java.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.extended.*;
-import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.phases.*;
-import com.oracle.graal.phases.common.*;
-import com.oracle.graal.snippets.Snippet.InliningPolicy;
-
-/**
- * Utility for snippet {@linkplain #install(Class) installation}.
- */
-public class SnippetInstaller {
-
-    private final MetaAccessProvider runtime;
-    private final TargetDescription target;
-    private final BoxingMethodPool pool;
-
-    /**
-     * A graph cache used by this installer to avoid using the compiler
-     * storage for each method processed during snippet installation.
-     * Without this, all processed methods are to be determined as
-     * {@linkplain IntrinsificationPhase#canIntrinsify intrinsifiable}.
-     */
-    private final Map<ResolvedJavaMethod, StructuredGraph> graphCache;
-
-    public SnippetInstaller(MetaAccessProvider runtime, TargetDescription target) {
-        this.runtime = runtime;
-        this.target = target;
-        this.pool = new BoxingMethodPool(runtime);
-        this.graphCache = new HashMap<>();
-    }
-
-    /**
-     * Finds all the snippet methods in a given class, builds a graph for them and
-     * installs the graph with the key value of {@code Graph.class} in the
-     * {@linkplain ResolvedJavaMethod#getCompilerStorage() compiler storage} of each method.
-     * <p>
-     * If {@code snippetsHolder} is annotated with {@link ClassSubstitution}, then all
-     * methods in the class are snippets. Otherwise, the snippets are those methods
-     * annotated with {@link Snippet}.
-     */
-    public void install(Class<? extends SnippetsInterface> snippetsHolder) {
-        if (snippetsHolder.isAnnotationPresent(ClassSubstitution.class)) {
-            installSubstitutions(snippetsHolder, snippetsHolder.getAnnotation(ClassSubstitution.class).value());
-        } else {
-            installSnippets(snippetsHolder);
-        }
-    }
-
-    private void installSnippets(Class< ? extends SnippetsInterface> clazz) {
-        for (Method method : clazz.getDeclaredMethods()) {
-            if (method.getAnnotation(Snippet.class) != null) {
-                int modifiers = method.getModifiers();
-                if (Modifier.isAbstract(modifiers) || Modifier.isNative(modifiers)) {
-                    throw new RuntimeException("Snippet must not be abstract or native");
-                }
-                ResolvedJavaMethod snippet = runtime.lookupJavaMethod(method);
-                assert snippet.getCompilerStorage().get(Graph.class) == null;
-                StructuredGraph graph = makeGraph(snippet, inliningPolicy(snippet));
-                //System.out.println("snippet: " + graph);
-                snippet.getCompilerStorage().put(Graph.class, graph);
-            }
-        }
-    }
-
-    private void installSubstitutions(Class< ? extends SnippetsInterface> clazz, Class<?> originalClazz) {
-        for (Method method : clazz.getDeclaredMethods()) {
-            if (method.getAnnotation(NodeIntrinsic.class) != null) {
-                continue;
-            }
-            try {
-                Method originalMethod = originalClazz.getDeclaredMethod(method.getName(), method.getParameterTypes());
-                if (!originalMethod.getReturnType().isAssignableFrom(method.getReturnType())) {
-                    throw new RuntimeException("Snippet has incompatible return type");
-                }
-                int modifiers = method.getModifiers();
-                if (Modifier.isAbstract(modifiers) || Modifier.isNative(modifiers)) {
-                    throw new RuntimeException("Snippet must not be abstract or native");
-                }
-                ResolvedJavaMethod snippet = runtime.lookupJavaMethod(method);
-                StructuredGraph graph = makeGraph(snippet, inliningPolicy(snippet));
-                //System.out.println("snippet: " + graph);
-                runtime.lookupJavaMethod(originalMethod).getCompilerStorage().put(Graph.class, graph);
-            } catch (NoSuchMethodException e) {
-                throw new GraalInternalError("Could not resolve method in " + originalClazz + " to substitute with " + method, e);
-            }
-        }
-    }
-
-    private static InliningPolicy inliningPolicy(ResolvedJavaMethod method) {
-        Class<? extends InliningPolicy> policyClass = InliningPolicy.class;
-        Snippet snippet = method.getAnnotation(Snippet.class);
-        if (snippet != null) {
-            policyClass = snippet.inlining();
-        }
-        if (policyClass == InliningPolicy.class) {
-            return InliningPolicy.Default;
-        }
-        try {
-            return policyClass.getConstructor().newInstance();
-        } catch (Exception e) {
-            throw new GraalInternalError(e);
-        }
-    }
-
-    public StructuredGraph makeGraph(final ResolvedJavaMethod method, final InliningPolicy policy) {
-        StructuredGraph graph = parseGraph(method, policy);
-
-        new SnippetIntrinsificationPhase(runtime, pool, SnippetTemplate.hasConstantParameter(method)).apply(graph);
-
-        Debug.dump(graph, "%s: Final", method.getName());
-
-        return graph;
-    }
-
-    private StructuredGraph parseGraph(final ResolvedJavaMethod method, final InliningPolicy policy) {
-        StructuredGraph graph = graphCache.get(method);
-        if (graph == null) {
-            graph = buildGraph(method, policy == null ? inliningPolicy(method) : policy);
-            //System.out.println("built " + graph);
-            graphCache.put(method, graph);
-        }
-        return graph;
-    }
-
-    private StructuredGraph buildGraph(final ResolvedJavaMethod method, final InliningPolicy policy) {
-        final StructuredGraph graph = new StructuredGraph(method);
-        return Debug.scope("BuildSnippetGraph", new Object[] {method, graph}, new Callable<StructuredGraph>() {
-            @Override
-            public StructuredGraph call() throws Exception {
-                GraphBuilderConfiguration config = GraphBuilderConfiguration.getSnippetDefault();
-                GraphBuilderPhase graphBuilder = new GraphBuilderPhase(runtime, config, OptimisticOptimizations.NONE);
-                graphBuilder.apply(graph);
-
-                Debug.dump(graph, "%s: %s", method.getName(), GraphBuilderPhase.class.getSimpleName());
-
-                new SnippetVerificationPhase().apply(graph);
-
-                new SnippetIntrinsificationPhase(runtime, pool, true).apply(graph);
-
-                for (Invoke invoke : graph.getInvokes()) {
-                    MethodCallTargetNode callTarget = invoke.methodCallTarget();
-                    ResolvedJavaMethod callee = callTarget.targetMethod();
-                    if (policy.shouldInline(callee, method)) {
-                        StructuredGraph targetGraph = parseGraph(callee, policy);
-                        InliningUtil.inline(invoke, targetGraph, true);
-                        Debug.dump(graph, "after inlining %s", callee);
-                        if (GraalOptions.OptCanonicalizer) {
-                            new WordTypeRewriterPhase(target.wordKind).apply(graph);
-                            new CanonicalizerPhase(target, runtime, null).apply(graph);
-                        }
-                    }
-                }
-
-                new SnippetIntrinsificationPhase(runtime, pool, true).apply(graph);
-
-                new WordTypeRewriterPhase(target.wordKind).apply(graph);
-
-                new DeadCodeEliminationPhase().apply(graph);
-                if (GraalOptions.OptCanonicalizer) {
-                    new CanonicalizerPhase(target, runtime, null).apply(graph);
-                }
-
-                for (LoopEndNode end : graph.getNodes(LoopEndNode.class)) {
-                    end.disableSafepoint();
-                }
-
-                new InsertStateAfterPlaceholderPhase().apply(graph);
-
-                if (GraalOptions.ProbabilityAnalysis) {
-                    new DeadCodeEliminationPhase().apply(graph);
-                    new ComputeProbabilityPhase().apply(graph);
-                }
-                return graph;
-            }
-        });
-    }
-}
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.snippets;
+
+import java.lang.reflect.*;
+import java.util.*;
+import java.util.concurrent.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.Node.NodeIntrinsic;
+import com.oracle.graal.java.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.extended.*;
+import com.oracle.graal.nodes.java.*;
+import com.oracle.graal.phases.*;
+import com.oracle.graal.phases.common.*;
+import com.oracle.graal.snippets.Snippet.SnippetInliningPolicy;
+
+/**
+ * Utility for snippet {@linkplain #install(Class) installation}.
+ */
+public class SnippetInstaller {
+
+    private final MetaAccessProvider runtime;
+    private final TargetDescription target;
+    private final Assumptions assumptions;
+    private final BoxingMethodPool pool;
+
+    /**
+     * A graph cache used by this installer to avoid using the compiler
+     * storage for each method processed during snippet installation.
+     * Without this, all processed methods are to be determined as
+     * {@linkplain InliningUtil#canIntrinsify intrinsifiable}.
+     */
+    private final Map<ResolvedJavaMethod, StructuredGraph> graphCache;
+
+    public SnippetInstaller(MetaAccessProvider runtime, Assumptions assumptions, TargetDescription target) {
+        this.runtime = runtime;
+        this.target = target;
+        this.assumptions = assumptions;
+        this.pool = new BoxingMethodPool(runtime);
+        this.graphCache = new HashMap<>();
+    }
+
+    /**
+     * Finds all the snippet methods in a given class, builds a graph for them and
+     * installs the graph with the key value of {@code Graph.class} in the
+     * {@linkplain ResolvedJavaMethod#getCompilerStorage() compiler storage} of each method.
+     * <p>
+     * If {@code snippetsHolder} is annotated with {@link ClassSubstitution}, then all
+     * methods in the class are snippets. Otherwise, the snippets are those methods
+     * annotated with {@link Snippet}.
+     */
+    public void install(Class<? extends SnippetsInterface> snippetsHolder) {
+        if (snippetsHolder.isAnnotationPresent(ClassSubstitution.class)) {
+            installSubstitutions(snippetsHolder, snippetsHolder.getAnnotation(ClassSubstitution.class).value());
+        } else {
+            installSnippets(snippetsHolder);
+        }
+    }
+
+    private void installSnippets(Class< ? extends SnippetsInterface> clazz) {
+        for (Method method : clazz.getDeclaredMethods()) {
+            if (method.getAnnotation(Snippet.class) != null) {
+                int modifiers = method.getModifiers();
+                if (Modifier.isAbstract(modifiers) || Modifier.isNative(modifiers)) {
+                    throw new RuntimeException("Snippet must not be abstract or native");
+                }
+                ResolvedJavaMethod snippet = runtime.lookupJavaMethod(method);
+                assert snippet.getCompilerStorage().get(Graph.class) == null;
+                StructuredGraph graph = makeGraph(snippet, inliningPolicy(snippet));
+                //System.out.println("snippet: " + graph);
+                snippet.getCompilerStorage().put(Graph.class, graph);
+            }
+        }
+    }
+
+    private void installSubstitutions(Class< ? extends SnippetsInterface> clazz, Class<?> originalClazz) {
+        for (Method method : clazz.getDeclaredMethods()) {
+            if (method.getAnnotation(NodeIntrinsic.class) != null) {
+                continue;
+            }
+            try {
+                Method originalMethod = originalClazz.getDeclaredMethod(method.getName(), method.getParameterTypes());
+                if (!originalMethod.getReturnType().isAssignableFrom(method.getReturnType())) {
+                    throw new RuntimeException("Snippet has incompatible return type");
+                }
+                int modifiers = method.getModifiers();
+                if (Modifier.isAbstract(modifiers) || Modifier.isNative(modifiers)) {
+                    throw new RuntimeException("Snippet must not be abstract or native");
+                }
+                ResolvedJavaMethod snippet = runtime.lookupJavaMethod(method);
+                StructuredGraph graph = makeGraph(snippet, inliningPolicy(snippet));
+                //System.out.println("snippet: " + graph);
+                runtime.lookupJavaMethod(originalMethod).getCompilerStorage().put(Graph.class, graph);
+            } catch (NoSuchMethodException e) {
+                throw new GraalInternalError("Could not resolve method in " + originalClazz + " to substitute with " + method, e);
+            }
+        }
+    }
+
+    private static SnippetInliningPolicy inliningPolicy(ResolvedJavaMethod method) {
+        Class<? extends SnippetInliningPolicy> policyClass = SnippetInliningPolicy.class;
+        Snippet snippet = method.getAnnotation(Snippet.class);
+        if (snippet != null) {
+            policyClass = snippet.inlining();
+        }
+        if (policyClass == SnippetInliningPolicy.class) {
+            return SnippetInliningPolicy.Default;
+        }
+        try {
+            return policyClass.getConstructor().newInstance();
+        } catch (Exception e) {
+            throw new GraalInternalError(e);
+        }
+    }
+
+    public StructuredGraph makeGraph(final ResolvedJavaMethod method, final SnippetInliningPolicy policy) {
+        StructuredGraph graph = parseGraph(method, policy);
+
+        new SnippetIntrinsificationPhase(runtime, pool, SnippetTemplate.hasConstantParameter(method)).apply(graph);
+
+        Debug.dump(graph, "%s: Final", method.getName());
+
+        return graph;
+    }
+
+    private StructuredGraph parseGraph(final ResolvedJavaMethod method, final SnippetInliningPolicy policy) {
+        StructuredGraph graph = graphCache.get(method);
+        if (graph == null) {
+            graph = buildGraph(method, policy == null ? inliningPolicy(method) : policy);
+            //System.out.println("built " + graph);
+            graphCache.put(method, graph);
+        }
+        return graph;
+    }
+
+    private StructuredGraph buildGraph(final ResolvedJavaMethod method, final SnippetInliningPolicy policy) {
+        final StructuredGraph graph = new StructuredGraph(method);
+        return Debug.scope("BuildSnippetGraph", new Object[] {method, graph}, new Callable<StructuredGraph>() {
+            @Override
+            public StructuredGraph call() throws Exception {
+                GraphBuilderConfiguration config = GraphBuilderConfiguration.getSnippetDefault();
+                GraphBuilderPhase graphBuilder = new GraphBuilderPhase(runtime, config, OptimisticOptimizations.NONE);
+                graphBuilder.apply(graph);
+
+                Debug.dump(graph, "%s: %s", method.getName(), GraphBuilderPhase.class.getSimpleName());
+
+                new SnippetVerificationPhase().apply(graph);
+
+                new SnippetIntrinsificationPhase(runtime, pool, true).apply(graph);
+
+                for (Invoke invoke : graph.getInvokes()) {
+                    MethodCallTargetNode callTarget = invoke.methodCallTarget();
+                    ResolvedJavaMethod callee = callTarget.targetMethod();
+                    if (policy.shouldInline(callee, method)) {
+                        StructuredGraph targetGraph = parseGraph(callee, policy);
+                        InliningUtil.inline(invoke, targetGraph, true);
+                        Debug.dump(graph, "after inlining %s", callee);
+                        if (GraalOptions.OptCanonicalizer) {
+                            new WordTypeRewriterPhase(target.wordKind).apply(graph);
+                            new CanonicalizerPhase(target, runtime, assumptions).apply(graph);
+                        }
+                    }
+                }
+
+                new SnippetIntrinsificationPhase(runtime, pool, true).apply(graph);
+
+                new WordTypeRewriterPhase(target.wordKind).apply(graph);
+
+                new DeadCodeEliminationPhase().apply(graph);
+                if (GraalOptions.OptCanonicalizer) {
+                    new CanonicalizerPhase(target, runtime, assumptions).apply(graph);
+                }
+
+                for (LoopEndNode end : graph.getNodes(LoopEndNode.class)) {
+                    end.disableSafepoint();
+                }
+
+                new InsertStateAfterPlaceholderPhase().apply(graph);
+
+                if (GraalOptions.ProbabilityAnalysis) {
+                    new DeadCodeEliminationPhase().apply(graph);
+                    new ComputeProbabilityPhase().apply(graph);
+                }
+                return graph;
+            }
+        });
+    }
+}
--- a/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetTemplate.java	Mon Nov 26 19:30:54 2012 -0800
+++ b/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetTemplate.java	Tue Nov 27 12:12:02 2012 +0100
@@ -165,13 +165,13 @@
         /**
          * Gets a template for a given key, creating it first if necessary.
          */
-        public SnippetTemplate get(final SnippetTemplate.Key key) {
+        public SnippetTemplate get(final SnippetTemplate.Key key, final Assumptions assumptions) {
             SnippetTemplate template = templates.get(key);
             if (template == null) {
                 template = Debug.scope("SnippetSpecialization", key.method, new Callable<SnippetTemplate>() {
                     @Override
                     public SnippetTemplate call() throws Exception {
-                        return new SnippetTemplate(runtime, target, key);
+                        return new SnippetTemplate(runtime, assumptions, target, key);
                     }
                 });
                 //System.out.println(key + " -> " + template);
@@ -184,9 +184,12 @@
     public abstract static class AbstractTemplates<T extends SnippetsInterface> {
         protected final Cache cache;
         protected final MetaAccessProvider runtime;
+        protected final Assumptions assumptions;
         protected Class<T> snippetsClass;
-        public AbstractTemplates(MetaAccessProvider runtime, TargetDescription target, Class<T> snippetsClass) {
+
+        public AbstractTemplates(MetaAccessProvider runtime, Assumptions assumptions, TargetDescription target, Class<T> snippetsClass) {
             this.runtime = runtime;
+            this.assumptions = assumptions;
             this.snippetsClass = snippetsClass;
             this.cache = new Cache(runtime, target);
         }
@@ -217,7 +220,7 @@
     /**
      * Creates a snippet template.
      */
-    public SnippetTemplate(MetaAccessProvider runtime, TargetDescription target, SnippetTemplate.Key key) {
+    public SnippetTemplate(MetaAccessProvider runtime, Assumptions assumptions, TargetDescription target, SnippetTemplate.Key key) {
         ResolvedJavaMethod method = key.method;
         assert Modifier.isStatic(method.getModifiers()) : "snippet method must be static: " + method;
         Signature signature = method.getSignature();
@@ -264,7 +267,7 @@
             new SnippetIntrinsificationPhase(runtime, new BoxingMethodPool(runtime), false).apply(snippetCopy);
             new WordTypeRewriterPhase(target.wordKind).apply(snippetCopy);
 
-            new CanonicalizerPhase(null, runtime, null, 0, null).apply(snippetCopy);
+            new CanonicalizerPhase(null, runtime, assumptions, 0, null).apply(snippetCopy);
         }
 
         // Gather the template parameters
@@ -317,8 +320,8 @@
                 if (loopBegin != null) {
                     LoopEx loop = new LoopsData(snippetCopy).loop(loopBegin);
                     int mark = snippetCopy.getMark();
-                    LoopTransformations.fullUnroll(loop, runtime);
-                    new CanonicalizerPhase(null, runtime, null, mark, null).apply(snippetCopy);
+                    LoopTransformations.fullUnroll(loop, runtime, null);
+                    new CanonicalizerPhase(null, runtime, assumptions, mark, null).apply(snippetCopy);
                 }
                 FixedNode explodeLoopNext = explodeLoop.next();
                 explodeLoop.clearSuccessors();
--- a/src/cpu/x86/vm/c1_globals_x86.hpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/cpu/x86/vm/c1_globals_x86.hpp	Tue Nov 27 12:12:02 2012 +0100
@@ -52,8 +52,8 @@
 #ifdef GRAAL
 define_pd_global(bool, ProfileTraps,                 true );
 define_pd_global(bool, UseOnStackReplacement,        true);
-define_pd_global(intx, CompileThreshold,             2500 );
-define_pd_global(intx, InitialCodeCacheSize,         4*M  );
+define_pd_global(intx, CompileThreshold,             10000 );
+define_pd_global(intx, InitialCodeCacheSize,         16*M  );
 define_pd_global(intx, ReservedCodeCacheSize,        64*M );
 define_pd_global(bool, ProfileInterpreter,           true );
 define_pd_global(intx, CodeCacheExpansionSize,       64*K );
--- a/src/share/vm/c1/c1_globals.hpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/c1/c1_globals.hpp	Tue Nov 27 12:12:02 2012 +0100
@@ -60,6 +60,8 @@
           "Enable JVMTI for the compiler thread")                           \
   product(bool, BootstrapGraal, true,                                       \
           "Bootstrap graal before running Java main method")                \
+  product(ccstr, GraalClassPath, NULL,                                      \
+          "Use the defined graal class path instead of searching for the classes") \
   product(intx, TraceGraal, 0,                                              \
           "Trace level for graal")                                          \
   product(bool, TraceSignals, false,                                        \
--- a/src/share/vm/code/codeBlob.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/code/codeBlob.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -1,581 +1,586 @@
-/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeBlob.hpp"
-#include "code/codeCache.hpp"
-#include "code/relocInfo.hpp"
-#include "compiler/disassembler.hpp"
-#include "interpreter/bytecode.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/heap.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/forte.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/vframe.hpp"
-#include "services/memoryService.hpp"
-#ifdef TARGET_ARCH_x86
-# include "nativeInst_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "nativeInst_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "nativeInst_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "nativeInst_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "nativeInst_ppc.hpp"
-#endif
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-
-unsigned int align_code_offset(int offset) {
-  // align the size to CodeEntryAlignment
-  return
-    ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
-    - (int)CodeHeap::header_size();
-}
-
-
-// This must be consistent with the CodeBlob constructor's layout actions.
-unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
-  unsigned int size = header_size;
-  size += round_to(cb->total_relocation_size(), oopSize);
-  // align the size to CodeEntryAlignment
-  size = align_code_offset(size);
-  size += round_to(cb->total_content_size(), oopSize);
-  size += round_to(cb->total_oop_size(), oopSize);
-  size += round_to(cb->total_metadata_size(), oopSize);
-  return size;
-}
-
-
-// Creates a simple CodeBlob. Sets up the size of the different regions.
-CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
-  assert(size        == round_to(size,        oopSize), "unaligned size");
-  assert(locs_size   == round_to(locs_size,   oopSize), "unaligned size");
-  assert(header_size == round_to(header_size, oopSize), "unaligned size");
-  assert(!UseRelocIndex, "no space allocated for reloc index yet");
-
-  // Note: If UseRelocIndex is enabled, there needs to be (at least) one
-  //       extra word for the relocation information, containing the reloc
-  //       index table length. Unfortunately, the reloc index table imple-
-  //       mentation is not easily understandable and thus it is not clear
-  //       what exactly the format is supposed to be. For now, we just turn
-  //       off the use of this table (gri 7/6/2000).
-
-  _name                  = name;
-  _size                  = size;
-  _frame_complete_offset = frame_complete;
-  _header_size           = header_size;
-  _relocation_size       = locs_size;
-  _content_offset        = align_code_offset(header_size + _relocation_size);
-  _code_offset           = _content_offset;
-  _data_offset           = size;
-  _frame_size            =  0;
-  set_oop_maps(NULL);
-}
-
-
-// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
-// and copy code and relocation info.
-CodeBlob::CodeBlob(
-  const char* name,
-  CodeBuffer* cb,
-  int         header_size,
-  int         size,
-  int         frame_complete,
-  int         frame_size,
-  OopMapSet*  oop_maps
-) {
-  assert(size        == round_to(size,        oopSize), "unaligned size");
-  assert(header_size == round_to(header_size, oopSize), "unaligned size");
-
-  _name                  = name;
-  _size                  = size;
-  _frame_complete_offset = frame_complete;
-  _header_size           = header_size;
-  _relocation_size       = round_to(cb->total_relocation_size(), oopSize);
-  _content_offset        = align_code_offset(header_size + _relocation_size);
-  _code_offset           = _content_offset + cb->total_offset_of(cb->insts());
-  _data_offset           = _content_offset + round_to(cb->total_content_size(), oopSize);
-  assert(_data_offset <= size, "codeBlob is too small");
-
-  cb->copy_code_and_locs_to(this);
-  set_oop_maps(oop_maps);
-  _frame_size = frame_size;
-#ifdef COMPILER1
-  // probably wrong for tiered
-  assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
-#endif // COMPILER1
-}
-
-
-void CodeBlob::set_oop_maps(OopMapSet* p) {
-  // Danger Will Robinson! This method allocates a big
-  // chunk of memory, its your job to free it.
-  if (p != NULL) {
-    // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps
-    _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode);
-    p->copy_to((address)_oop_maps);
-  } else {
-    _oop_maps = NULL;
-  }
-}
-
-
-void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
-  // Do not hold the CodeCache lock during name formatting.
-  assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
-
-  if (stub != NULL) {
-    char stub_id[256];
-    assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
-    jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
-    if (PrintStubCode) {
-      ttyLocker ttyl;
-      tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
-      Disassembler::decode(stub->code_begin(), stub->code_end());
-      tty->cr();
-    }
-    Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
-
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      const char* stub_name = name2;
-      if (name2[0] == '\0')  stub_name = name1;
-      JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
-    }
-  }
-
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
-}
-
-
-void CodeBlob::flush() {
-  if (_oop_maps) {
-    FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode);
-    _oop_maps = NULL;
-  }
-  _comments.free();
-}
-
-
-OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
-  assert(oop_maps() != NULL, "nope");
-  return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of BufferBlob
-
-
-BufferBlob::BufferBlob(const char* name, int size)
-: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
-{}
-
-BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-
-  BufferBlob* blob = NULL;
-  unsigned int size = sizeof(BufferBlob);
-  // align the size to CodeEntryAlignment
-  size = align_code_offset(size);
-  size += round_to(buffer_size, oopSize);
-  assert(name != NULL, "must provide a name");
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) BufferBlob(name, size);
-  }
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
-
-  return blob;
-}
-
-
-BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
-  : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
-{}
-
-BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-
-  BufferBlob* blob = NULL;
-  unsigned int size = allocation_size(cb, sizeof(BufferBlob));
-  assert(name != NULL, "must provide a name");
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) BufferBlob(name, size, cb);
-  }
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
-
-  return blob;
-}
-
-
-void* BufferBlob::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
-  return p;
-}
-
-
-void BufferBlob::free( BufferBlob *blob ) {
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    CodeCache::free((CodeBlob*)blob);
-  }
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of AdapterBlob
-
-AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
-  BufferBlob("I2C/C2I adapters", size, cb) {
-  CodeCache::commit(this);
-}
-
-AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-
-  AdapterBlob* blob = NULL;
-  unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) AdapterBlob(size, cb);
-  }
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
-
-  return blob;
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of MethodHandlesAdapterBlob
-
-MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-
-  MethodHandlesAdapterBlob* blob = NULL;
-  unsigned int size = sizeof(MethodHandlesAdapterBlob);
-  // align the size to CodeEntryAlignment
-  size = align_code_offset(size);
-  size += round_to(buffer_size, oopSize);
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) MethodHandlesAdapterBlob(size);
-  }
-  // Track memory usage statistic after releasing CodeCache_lock
-  MemoryService::track_code_cache_memory_usage();
-
-  return blob;
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of RuntimeStub
-
-RuntimeStub::RuntimeStub(
-  const char* name,
-  CodeBuffer* cb,
-  int         size,
-  int         frame_complete,
-  int         frame_size,
-  OopMapSet*  oop_maps,
-  bool        caller_must_gc_arguments
-)
-: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps)
-{
-  _caller_must_gc_arguments = caller_must_gc_arguments;
-}
-
-
-RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
-                                           CodeBuffer* cb,
-                                           int frame_complete,
-                                           int frame_size,
-                                           OopMapSet* oop_maps,
-                                           bool caller_must_gc_arguments)
-{
-  RuntimeStub* stub = NULL;
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    unsigned int size = allocation_size(cb, sizeof(RuntimeStub));
-    stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
-  }
-
-  trace_new_stub(stub, "RuntimeStub - ", stub_name);
-
-  return stub;
-}
-
-
-void* RuntimeStub::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
-  if (!p) fatal("Initial size of CodeCache is too small");
-  return p;
-}
-
-// operator new shared by all singletons:
-void* SingletonBlob::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
-  if (!p) fatal("Initial size of CodeCache is too small");
-  return p;
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of DeoptimizationBlob
-
-DeoptimizationBlob::DeoptimizationBlob(
-  CodeBuffer* cb,
-  int         size,
-  OopMapSet*  oop_maps,
-  int         unpack_offset,
-  int         unpack_with_exception_offset,
-  int         unpack_with_reexecution_offset,
-  int         frame_size
-)
-: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
-{
-  _unpack_offset           = unpack_offset;
-  _unpack_with_exception   = unpack_with_exception_offset;
-  _unpack_with_reexecution = unpack_with_reexecution_offset;
-#ifdef COMPILER1
-  _unpack_with_exception_in_tls   = -1;
-#endif
-}
-
-
-DeoptimizationBlob* DeoptimizationBlob::create(
-  CodeBuffer* cb,
-  OopMapSet*  oop_maps,
-  int        unpack_offset,
-  int        unpack_with_exception_offset,
-  int        unpack_with_reexecution_offset,
-  int        frame_size)
-{
-  DeoptimizationBlob* blob = NULL;
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob));
-    blob = new (size) DeoptimizationBlob(cb,
-                                         size,
-                                         oop_maps,
-                                         unpack_offset,
-                                         unpack_with_exception_offset,
-                                         unpack_with_reexecution_offset,
-                                         frame_size);
-  }
-
-  trace_new_stub(blob, "DeoptimizationBlob");
-
-  return blob;
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of UncommonTrapBlob
-
-#ifdef COMPILER2
-UncommonTrapBlob::UncommonTrapBlob(
-  CodeBuffer* cb,
-  int         size,
-  OopMapSet*  oop_maps,
-  int         frame_size
-)
-: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
-{}
-
-
-UncommonTrapBlob* UncommonTrapBlob::create(
-  CodeBuffer* cb,
-  OopMapSet*  oop_maps,
-  int        frame_size)
-{
-  UncommonTrapBlob* blob = NULL;
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob));
-    blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
-  }
-
-  trace_new_stub(blob, "UncommonTrapBlob");
-
-  return blob;
-}
-
-
-#endif // COMPILER2
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of ExceptionBlob
-
-#ifdef COMPILER2
-ExceptionBlob::ExceptionBlob(
-  CodeBuffer* cb,
-  int         size,
-  OopMapSet*  oop_maps,
-  int         frame_size
-)
-: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
-{}
-
-
-ExceptionBlob* ExceptionBlob::create(
-  CodeBuffer* cb,
-  OopMapSet*  oop_maps,
-  int         frame_size)
-{
-  ExceptionBlob* blob = NULL;
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    unsigned int size = allocation_size(cb, sizeof(ExceptionBlob));
-    blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
-  }
-
-  trace_new_stub(blob, "ExceptionBlob");
-
-  return blob;
-}
-
-
-#endif // COMPILER2
-
-
-//----------------------------------------------------------------------------------------------------
-// Implementation of SafepointBlob
-
-SafepointBlob::SafepointBlob(
-  CodeBuffer* cb,
-  int         size,
-  OopMapSet*  oop_maps,
-  int         frame_size
-)
-: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
-{}
-
-
-SafepointBlob* SafepointBlob::create(
-  CodeBuffer* cb,
-  OopMapSet*  oop_maps,
-  int         frame_size)
-{
-  SafepointBlob* blob = NULL;
-  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    unsigned int size = allocation_size(cb, sizeof(SafepointBlob));
-    blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
-  }
-
-  trace_new_stub(blob, "SafepointBlob");
-
-  return blob;
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Verification and printing
-
-void CodeBlob::verify() {
-  ShouldNotReachHere();
-}
-
-void CodeBlob::print_on(outputStream* st) const {
-  st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this);
-  st->print_cr("Framesize: %d", _frame_size);
-}
-
-void CodeBlob::print_value_on(outputStream* st) const {
-  st->print_cr("[CodeBlob]");
-}
-
-void BufferBlob::verify() {
-  // unimplemented
-}
-
-void BufferBlob::print_on(outputStream* st) const {
-  CodeBlob::print_on(st);
-  print_value_on(st);
-}
-
-void BufferBlob::print_value_on(outputStream* st) const {
-  st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", this, name());
-}
-
-void RuntimeStub::verify() {
-  // unimplemented
-}
-
-void RuntimeStub::print_on(outputStream* st) const {
-  ttyLocker ttyl;
-  CodeBlob::print_on(st);
-  st->print("Runtime Stub (" INTPTR_FORMAT "): ", this);
-  st->print_cr(name());
-  Disassembler::decode((CodeBlob*)this, st);
-}
-
-void RuntimeStub::print_value_on(outputStream* st) const {
-  st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name());
-}
-
-void SingletonBlob::verify() {
-  // unimplemented
-}
-
-void SingletonBlob::print_on(outputStream* st) const {
-  ttyLocker ttyl;
-  CodeBlob::print_on(st);
-  st->print_cr(name());
-  Disassembler::decode((CodeBlob*)this, st);
-}
-
-void SingletonBlob::print_value_on(outputStream* st) const {
-  st->print_cr(name());
-}
-
-void DeoptimizationBlob::print_value_on(outputStream* st) const {
-  st->print_cr("Deoptimization (frame not available)");
-}
+/*
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeBlob.hpp"
+#include "code/codeCache.hpp"
+#include "code/relocInfo.hpp"
+#include "compiler/disassembler.hpp"
+#include "interpreter/bytecode.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/heap.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/forte.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/vframe.hpp"
+#include "services/memoryService.hpp"
+#include "utilities/machineCodePrinter.hpp"
+#ifdef TARGET_ARCH_x86
+# include "nativeInst_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "nativeInst_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "nativeInst_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "nativeInst_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "nativeInst_ppc.hpp"
+#endif
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+
+unsigned int align_code_offset(int offset) {
+  // align the size to CodeEntryAlignment
+  return
+    ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
+    - (int)CodeHeap::header_size();
+}
+
+
+// This must be consistent with the CodeBlob constructor's layout actions.
+unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
+  unsigned int size = header_size;
+  size += round_to(cb->total_relocation_size(), oopSize);
+  // align the size to CodeEntryAlignment
+  size = align_code_offset(size);
+  size += round_to(cb->total_content_size(), oopSize);
+  size += round_to(cb->total_oop_size(), oopSize);
+  size += round_to(cb->total_metadata_size(), oopSize);
+  return size;
+}
+
+
+// Creates a simple CodeBlob. Sets up the size of the different regions.
+CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
+  assert(size        == round_to(size,        oopSize), "unaligned size");
+  assert(locs_size   == round_to(locs_size,   oopSize), "unaligned size");
+  assert(header_size == round_to(header_size, oopSize), "unaligned size");
+  assert(!UseRelocIndex, "no space allocated for reloc index yet");
+
+  // Note: If UseRelocIndex is enabled, there needs to be (at least) one
+  //       extra word for the relocation information, containing the reloc
+  //       index table length. Unfortunately, the reloc index table imple-
+  //       mentation is not easily understandable and thus it is not clear
+  //       what exactly the format is supposed to be. For now, we just turn
+  //       off the use of this table (gri 7/6/2000).
+
+  _name                  = name;
+  _size                  = size;
+  _frame_complete_offset = frame_complete;
+  _header_size           = header_size;
+  _relocation_size       = locs_size;
+  _content_offset        = align_code_offset(header_size + _relocation_size);
+  _code_offset           = _content_offset;
+  _data_offset           = size;
+  _frame_size            =  0;
+  set_oop_maps(NULL);
+}
+
+
+// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
+// and copy code and relocation info.
+CodeBlob::CodeBlob(
+  const char* name,
+  CodeBuffer* cb,
+  int         header_size,
+  int         size,
+  int         frame_complete,
+  int         frame_size,
+  OopMapSet*  oop_maps
+) {
+  assert(size        == round_to(size,        oopSize), "unaligned size");
+  assert(header_size == round_to(header_size, oopSize), "unaligned size");
+
+  _name                  = name;
+  _size                  = size;
+  _frame_complete_offset = frame_complete;
+  _header_size           = header_size;
+  _relocation_size       = round_to(cb->total_relocation_size(), oopSize);
+  _content_offset        = align_code_offset(header_size + _relocation_size);
+  _code_offset           = _content_offset + cb->total_offset_of(cb->insts());
+  _data_offset           = _content_offset + round_to(cb->total_content_size(), oopSize);
+  assert(_data_offset <= size, "codeBlob is too small");
+
+  cb->copy_code_and_locs_to(this);
+  set_oop_maps(oop_maps);
+  _frame_size = frame_size;
+#ifdef COMPILER1
+  // probably wrong for tiered
+  assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
+#endif // COMPILER1
+}
+
+
+void CodeBlob::set_oop_maps(OopMapSet* p) {
+  // Danger Will Robinson! This method allocates a big
+  // chunk of memory, its your job to free it.
+  if (p != NULL) {
+    // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps
+    _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode);
+    p->copy_to((address)_oop_maps);
+  } else {
+    _oop_maps = NULL;
+  }
+}
+
+
+void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
+  // Do not hold the CodeCache lock during name formatting.
+  assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
+
+  if (stub != NULL) {
+    char stub_id[256];
+    assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
+    jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
+    if (PrintStubCode) {
+      ttyLocker ttyl;
+      tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
+      Disassembler::decode(stub->code_begin(), stub->code_end());
+      tty->cr();
+    }
+    Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
+
+    if (JvmtiExport::should_post_dynamic_code_generated()) {
+      const char* stub_name = name2;
+      if (name2[0] == '\0')  stub_name = name1;
+      JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
+    }
+  }
+
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+}
+
+
+void CodeBlob::flush() {
+  if (_oop_maps) {
+    FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode);
+    _oop_maps = NULL;
+  }
+  _comments.free();
+}
+
+
+OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
+  assert(oop_maps() != NULL, "nope");
+  return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of BufferBlob
+
+
+BufferBlob::BufferBlob(const char* name, int size)
+: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
+{}
+
+BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  BufferBlob* blob = NULL;
+  unsigned int size = sizeof(BufferBlob);
+  // align the size to CodeEntryAlignment
+  size = align_code_offset(size);
+  size += round_to(buffer_size, oopSize);
+  assert(name != NULL, "must provide a name");
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) BufferBlob(name, size);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
+
+
+BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
+  : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
+{}
+
+BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  BufferBlob* blob = NULL;
+  unsigned int size = allocation_size(cb, sizeof(BufferBlob));
+  assert(name != NULL, "must provide a name");
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) BufferBlob(name, size, cb);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
+
+
+void* BufferBlob::operator new(size_t s, unsigned size) {
+  void* p = CodeCache::allocate(size);
+  return p;
+}
+
+
+void BufferBlob::free( BufferBlob *blob ) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    CodeCache::free((CodeBlob*)blob);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of AdapterBlob
+
+AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
+  BufferBlob("I2C/C2I adapters", size, cb) {
+  CodeCache::commit(this);
+}
+
+AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  AdapterBlob* blob = NULL;
+  unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) AdapterBlob(size, cb);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of MethodHandlesAdapterBlob
+
+MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  MethodHandlesAdapterBlob* blob = NULL;
+  unsigned int size = sizeof(MethodHandlesAdapterBlob);
+  // align the size to CodeEntryAlignment
+  size = align_code_offset(size);
+  size += round_to(buffer_size, oopSize);
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) MethodHandlesAdapterBlob(size);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of RuntimeStub
+
+RuntimeStub::RuntimeStub(
+  const char* name,
+  CodeBuffer* cb,
+  int         size,
+  int         frame_complete,
+  int         frame_size,
+  OopMapSet*  oop_maps,
+  bool        caller_must_gc_arguments
+)
+: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps)
+{
+  _caller_must_gc_arguments = caller_must_gc_arguments;
+}
+
+
+RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
+                                           CodeBuffer* cb,
+                                           int frame_complete,
+                                           int frame_size,
+                                           OopMapSet* oop_maps,
+                                           bool caller_must_gc_arguments)
+{
+  RuntimeStub* stub = NULL;
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    unsigned int size = allocation_size(cb, sizeof(RuntimeStub));
+    stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
+  }
+
+  trace_new_stub(stub, "RuntimeStub - ", stub_name);
+
+  if (PrintMachineCodeToFile) {
+    MachineCodePrinter::print(stub);
+  }
+
+  return stub;
+}
+
+
+void* RuntimeStub::operator new(size_t s, unsigned size) {
+  void* p = CodeCache::allocate(size);
+  if (!p) fatal("Initial size of CodeCache is too small");
+  return p;
+}
+
+// operator new shared by all singletons:
+void* SingletonBlob::operator new(size_t s, unsigned size) {
+  void* p = CodeCache::allocate(size);
+  if (!p) fatal("Initial size of CodeCache is too small");
+  return p;
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of DeoptimizationBlob
+
+DeoptimizationBlob::DeoptimizationBlob(
+  CodeBuffer* cb,
+  int         size,
+  OopMapSet*  oop_maps,
+  int         unpack_offset,
+  int         unpack_with_exception_offset,
+  int         unpack_with_reexecution_offset,
+  int         frame_size
+)
+: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
+{
+  _unpack_offset           = unpack_offset;
+  _unpack_with_exception   = unpack_with_exception_offset;
+  _unpack_with_reexecution = unpack_with_reexecution_offset;
+#ifdef COMPILER1
+  _unpack_with_exception_in_tls   = -1;
+#endif
+}
+
+
+DeoptimizationBlob* DeoptimizationBlob::create(
+  CodeBuffer* cb,
+  OopMapSet*  oop_maps,
+  int        unpack_offset,
+  int        unpack_with_exception_offset,
+  int        unpack_with_reexecution_offset,
+  int        frame_size)
+{
+  DeoptimizationBlob* blob = NULL;
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob));
+    blob = new (size) DeoptimizationBlob(cb,
+                                         size,
+                                         oop_maps,
+                                         unpack_offset,
+                                         unpack_with_exception_offset,
+                                         unpack_with_reexecution_offset,
+                                         frame_size);
+  }
+
+  trace_new_stub(blob, "DeoptimizationBlob");
+
+  return blob;
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of UncommonTrapBlob
+
+#ifdef COMPILER2
+UncommonTrapBlob::UncommonTrapBlob(
+  CodeBuffer* cb,
+  int         size,
+  OopMapSet*  oop_maps,
+  int         frame_size
+)
+: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
+{}
+
+
+UncommonTrapBlob* UncommonTrapBlob::create(
+  CodeBuffer* cb,
+  OopMapSet*  oop_maps,
+  int        frame_size)
+{
+  UncommonTrapBlob* blob = NULL;
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob));
+    blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
+  }
+
+  trace_new_stub(blob, "UncommonTrapBlob");
+
+  return blob;
+}
+
+
+#endif // COMPILER2
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of ExceptionBlob
+
+#ifdef COMPILER2
+ExceptionBlob::ExceptionBlob(
+  CodeBuffer* cb,
+  int         size,
+  OopMapSet*  oop_maps,
+  int         frame_size
+)
+: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
+{}
+
+
+ExceptionBlob* ExceptionBlob::create(
+  CodeBuffer* cb,
+  OopMapSet*  oop_maps,
+  int         frame_size)
+{
+  ExceptionBlob* blob = NULL;
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    unsigned int size = allocation_size(cb, sizeof(ExceptionBlob));
+    blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
+  }
+
+  trace_new_stub(blob, "ExceptionBlob");
+
+  return blob;
+}
+
+
+#endif // COMPILER2
+
+
+//----------------------------------------------------------------------------------------------------
+// Implementation of SafepointBlob
+
+SafepointBlob::SafepointBlob(
+  CodeBuffer* cb,
+  int         size,
+  OopMapSet*  oop_maps,
+  int         frame_size
+)
+: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
+{}
+
+
+SafepointBlob* SafepointBlob::create(
+  CodeBuffer* cb,
+  OopMapSet*  oop_maps,
+  int         frame_size)
+{
+  SafepointBlob* blob = NULL;
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    unsigned int size = allocation_size(cb, sizeof(SafepointBlob));
+    blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
+  }
+
+  trace_new_stub(blob, "SafepointBlob");
+
+  return blob;
+}
+
+
+//----------------------------------------------------------------------------------------------------
+// Verification and printing
+
+void CodeBlob::verify() {
+  ShouldNotReachHere();
+}
+
+void CodeBlob::print_on(outputStream* st) const {
+  st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this);
+  st->print_cr("Framesize: %d", _frame_size);
+}
+
+void CodeBlob::print_value_on(outputStream* st) const {
+  st->print_cr("[CodeBlob]");
+}
+
+void BufferBlob::verify() {
+  // unimplemented
+}
+
+void BufferBlob::print_on(outputStream* st) const {
+  CodeBlob::print_on(st);
+  print_value_on(st);
+}
+
+void BufferBlob::print_value_on(outputStream* st) const {
+  st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", this, name());
+}
+
+void RuntimeStub::verify() {
+  // unimplemented
+}
+
+void RuntimeStub::print_on(outputStream* st) const {
+  ttyLocker ttyl;
+  CodeBlob::print_on(st);
+  st->print("Runtime Stub (" INTPTR_FORMAT "): ", this);
+  st->print_cr(name());
+  Disassembler::decode((CodeBlob*)this, st);
+}
+
+void RuntimeStub::print_value_on(outputStream* st) const {
+  st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name());
+}
+
+void SingletonBlob::verify() {
+  // unimplemented
+}
+
+void SingletonBlob::print_on(outputStream* st) const {
+  ttyLocker ttyl;
+  CodeBlob::print_on(st);
+  st->print_cr(name());
+  Disassembler::decode((CodeBlob*)this, st);
+}
+
+void SingletonBlob::print_value_on(outputStream* st) const {
+  st->print_cr(name());
+}
+
+void DeoptimizationBlob::print_value_on(outputStream* st) const {
+  st->print_cr("Deoptimization (frame not available)");
+}
--- a/src/share/vm/code/icBuffer.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/code/icBuffer.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -110,8 +110,8 @@
 void ICStub::verify() {
 }
 
-void ICStub::print() {
-  tty->print_cr("ICStub: site: " INTPTR_FORMAT, _ic_site);
+void ICStub::print_on(outputStream* st) {
+  st->print_cr("ICStub: site: " INTPTR_FORMAT, _ic_site);
 }
 #endif
 
--- a/src/share/vm/code/icBuffer.hpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/code/icBuffer.hpp	Tue Nov 27 12:12:02 2012 +0100
@@ -75,8 +75,8 @@
   void* cached_value() const;   // cached_value for stub
 
   // Debugging
-  void    verify()            PRODUCT_RETURN;
-  void    print()             PRODUCT_RETURN;
+  void    verify()                   PRODUCT_RETURN;
+  void    print_on(outputStream* st) PRODUCT_RETURN;
 
   // Creation
   friend ICStub* ICStub_from_destination_address(address destination_address);
--- a/src/share/vm/code/nmethod.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/code/nmethod.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -1,3007 +1,3016 @@
-/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeCache.hpp"
-#include "code/compiledIC.hpp"
-#include "code/dependencies.hpp"
-#include "code/nmethod.hpp"
-#include "code/scopeDesc.hpp"
-#include "compiler/abstractCompiler.hpp"
-#include "compiler/compileBroker.hpp"
-#include "compiler/compileLog.hpp"
-#include "compiler/compilerOracle.hpp"
-#include "compiler/disassembler.hpp"
-#include "interpreter/bytecode.hpp"
-#include "oops/methodData.hpp"
-#include "prims/jvmtiRedefineClassesTrace.hpp"
-#include "prims/jvmtiImpl.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/sweeper.hpp"
-#include "utilities/dtrace.hpp"
-#include "utilities/events.hpp"
-#include "utilities/xmlstream.hpp"
-#include "utilities/debug.hpp"
-#ifdef SHARK
-#include "shark/sharkCompiler.hpp"
-#endif
-#ifdef GRAAL
-#include "graal/graalJavaAccess.hpp"
-#endif
-
-#ifdef DTRACE_ENABLED
-
-// Only bother with this argument setup if dtrace is available
-
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
-  const char*, int, const char*, int, const char*, int, void*, size_t);
-
-HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
-  char*, int, char*, int, char*, int);
-
-#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
-  {                                                                       \
-    Method* m = (method);                                                 \
-    if (m != NULL) {                                                      \
-      Symbol* klass_name = m->klass_name();                               \
-      Symbol* name = m->name();                                           \
-      Symbol* signature = m->signature();                                 \
-      HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
-        klass_name->bytes(), klass_name->utf8_length(),                   \
-        name->bytes(), name->utf8_length(),                               \
-        signature->bytes(), signature->utf8_length());                    \
-    }                                                                     \
-  }
-#else /* USDT2 */
-#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
-  {                                                                       \
-    Method* m = (method);                                                 \
-    if (m != NULL) {                                                      \
-      Symbol* klass_name = m->klass_name();                               \
-      Symbol* name = m->name();                                           \
-      Symbol* signature = m->signature();                                 \
-      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
-        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
-        (char *) name->bytes(), name->utf8_length(),                               \
-        (char *) signature->bytes(), signature->utf8_length());                    \
-    }                                                                     \
-  }
-#endif /* USDT2 */
-
-#else //  ndef DTRACE_ENABLED
-
-#define DTRACE_METHOD_UNLOAD_PROBE(method)
-
-#endif
-
-bool nmethod::is_compiled_by_c1() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
-  return compiler()->is_c1();
-}
-bool nmethod::is_compiled_by_c2() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
-  return compiler()->is_c2();
-}
-bool nmethod::is_compiled_by_shark() const {
-  if (is_native_method()) return false;
-  assert(compiler() != NULL, "must be");
-  return compiler()->is_shark();
-}
-
-
-
-//---------------------------------------------------------------------------------
-// NMethod statistics
-// They are printed under various flags, including:
-//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
-// (In the latter two cases, they like other stats are printed to the log only.)
-
-// These variables are put into one block to reduce relocations
-// and make it simpler to print from the debugger.
-static
-struct nmethod_stats_struct {
-  int nmethod_count;
-  int total_size;
-  int relocation_size;
-  int consts_size;
-  int insts_size;
-  int stub_size;
-  int scopes_data_size;
-  int scopes_pcs_size;
-  int dependencies_size;
-  int handler_table_size;
-  int nul_chk_table_size;
-  int oops_size;
-
-  void note_nmethod(nmethod* nm) {
-    nmethod_count += 1;
-    total_size          += nm->size();
-    relocation_size     += nm->relocation_size();
-    consts_size         += nm->consts_size();
-    insts_size          += nm->insts_size();
-    stub_size           += nm->stub_size();
-    oops_size           += nm->oops_size();
-    scopes_data_size    += nm->scopes_data_size();
-    scopes_pcs_size     += nm->scopes_pcs_size();
-    dependencies_size   += nm->dependencies_size();
-    handler_table_size  += nm->handler_table_size();
-    nul_chk_table_size  += nm->nul_chk_table_size();
-  }
-  void print_nmethod_stats() {
-    if (nmethod_count == 0)  return;
-    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
-    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
-    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
-    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
-    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
-    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
-    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
-    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
-    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
-    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
-    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
-    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
-  }
-
-  int native_nmethod_count;
-  int native_total_size;
-  int native_relocation_size;
-  int native_insts_size;
-  int native_oops_size;
-  void note_native_nmethod(nmethod* nm) {
-    native_nmethod_count += 1;
-    native_total_size       += nm->size();
-    native_relocation_size  += nm->relocation_size();
-    native_insts_size       += nm->insts_size();
-    native_oops_size        += nm->oops_size();
-  }
-  void print_native_nmethod_stats() {
-    if (native_nmethod_count == 0)  return;
-    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
-    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
-    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
-    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
-    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
-  }
-
-  int pc_desc_resets;   // number of resets (= number of caches)
-  int pc_desc_queries;  // queries to nmethod::find_pc_desc
-  int pc_desc_approx;   // number of those which have approximate true
-  int pc_desc_repeats;  // number of _pc_descs[0] hits
-  int pc_desc_hits;     // number of LRU cache hits
-  int pc_desc_tests;    // total number of PcDesc examinations
-  int pc_desc_searches; // total number of quasi-binary search steps
-  int pc_desc_adds;     // number of LUR cache insertions
-
-  void print_pc_stats() {
-    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
-                  pc_desc_queries,
-                  (double)(pc_desc_tests + pc_desc_searches)
-                  / pc_desc_queries);
-    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
-                  pc_desc_resets,
-                  pc_desc_queries, pc_desc_approx,
-                  pc_desc_repeats, pc_desc_hits,
-                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
-  }
-} nmethod_stats;
-
-
-//---------------------------------------------------------------------------------
-
-
-ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
-  assert(pc != NULL, "Must be non null");
-  assert(exception.not_null(), "Must be non null");
-  assert(handler != NULL, "Must be non null");
-
-  _count = 0;
-  _exception_type = exception->klass();
-  _next = NULL;
-
-  add_address_and_handler(pc,handler);
-}
-
-
-address ExceptionCache::match(Handle exception, address pc) {
-  assert(pc != NULL,"Must be non null");
-  assert(exception.not_null(),"Must be non null");
-  if (exception->klass() == exception_type()) {
-    return (test_address(pc));
-  }
-
-  return NULL;
-}
-
-
-bool ExceptionCache::match_exception_with_space(Handle exception) {
-  assert(exception.not_null(),"Must be non null");
-  if (exception->klass() == exception_type() && count() < cache_size) {
-    return true;
-  }
-  return false;
-}
-
-
-address ExceptionCache::test_address(address addr) {
-  for (int i=0; i<count(); i++) {
-    if (pc_at(i) == addr) {
-      return handler_at(i);
-    }
-  }
-  return NULL;
-}
-
-
-bool ExceptionCache::add_address_and_handler(address addr, address handler) {
-  if (test_address(addr) == handler) return true;
-  if (count() < cache_size) {
-    set_pc_at(count(),addr);
-    set_handler_at(count(), handler);
-    increment_count();
-    return true;
-  }
-  return false;
-}
-
-
-// private method for handling exception cache
-// These methods are private, and used to manipulate the exception cache
-// directly.
-ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
-  ExceptionCache* ec = exception_cache();
-  while (ec != NULL) {
-    if (ec->match_exception_with_space(exception)) {
-      return ec;
-    }
-    ec = ec->next();
-  }
-  return NULL;
-}
-
-
-//-----------------------------------------------------------------------------
-
-
-// Helper used by both find_pc_desc methods.
-static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
-  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
-  if (!approximate)
-    return pc->pc_offset() == pc_offset;
-  else
-    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
-}
-
-void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
-  if (initial_pc_desc == NULL) {
-    _pc_descs[0] = NULL; // native method; no PcDescs at all
-    return;
-  }
-  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
-  // reset the cache by filling it with benign (non-null) values
-  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
-  for (int i = 0; i < cache_size; i++)
-    _pc_descs[i] = initial_pc_desc;
-}
-
-PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
-  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
-  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
-
-  // Note: one might think that caching the most recently
-  // read value separately would be a win, but one would be
-  // wrong.  When many threads are updating it, the cache
-  // line it's in would bounce between caches, negating
-  // any benefit.
-
-  // In order to prevent race conditions do not load cache elements
-  // repeatedly, but use a local copy:
-  PcDesc* res;
-
-  // Step one:  Check the most recently added value.
-  res = _pc_descs[0];
-  if (res == NULL) return NULL;  // native method; no PcDescs at all
-  if (match_desc(res, pc_offset, approximate)) {
-    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
-    return res;
-  }
-
-  // Step two:  Check the rest of the LRU cache.
-  for (int i = 1; i < cache_size; ++i) {
-    res = _pc_descs[i];
-    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
-    if (match_desc(res, pc_offset, approximate)) {
-      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
-      return res;
-    }
-  }
-
-  // Report failure.
-  return NULL;
-}
-
-void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
-  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
-  // Update the LRU cache by shifting pc_desc forward.
-  for (int i = 0; i < cache_size; i++)  {
-    PcDesc* next = _pc_descs[i];
-    _pc_descs[i] = pc_desc;
-    pc_desc = next;
-  }
-}
-
-// adjust pcs_size so that it is a multiple of both oopSize and
-// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
-// of oopSize, then 2*sizeof(PcDesc) is)
-static int adjust_pcs_size(int pcs_size) {
-  int nsize = round_to(pcs_size,   oopSize);
-  if ((nsize % sizeof(PcDesc)) != 0) {
-    nsize = pcs_size + sizeof(PcDesc);
-  }
-  assert((nsize % oopSize) == 0, "correct alignment");
-  return nsize;
-}
-
-//-----------------------------------------------------------------------------
-
-
-void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
-  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
-  assert(new_entry != NULL,"Must be non null");
-  assert(new_entry->next() == NULL, "Must be null");
-
-  if (exception_cache() != NULL) {
-    new_entry->set_next(exception_cache());
-  }
-  set_exception_cache(new_entry);
-}
-
-void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
-  ExceptionCache* prev = NULL;
-  ExceptionCache* curr = exception_cache();
-  assert(curr != NULL, "nothing to remove");
-  // find the previous and next entry of ec
-  while (curr != ec) {
-    prev = curr;
-    curr = curr->next();
-    assert(curr != NULL, "ExceptionCache not found");
-  }
-  // now: curr == ec
-  ExceptionCache* next = curr->next();
-  if (prev == NULL) {
-    set_exception_cache(next);
-  } else {
-    prev->set_next(next);
-  }
-  delete curr;
-}
-
-
-// public method for accessing the exception cache
-// These are the public access methods.
-address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
-  // We never grab a lock to read the exception cache, so we may
-  // have false negatives. This is okay, as it can only happen during
-  // the first few exception lookups for a given nmethod.
-  ExceptionCache* ec = exception_cache();
-  while (ec != NULL) {
-    address ret_val;
-    if ((ret_val = ec->match(exception,pc)) != NULL) {
-      return ret_val;
-    }
-    ec = ec->next();
-  }
-  return NULL;
-}
-
-
-void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
-  // There are potential race conditions during exception cache updates, so we
-  // must own the ExceptionCache_lock before doing ANY modifications. Because
-  // we don't lock during reads, it is possible to have several threads attempt
-  // to update the cache with the same data. We need to check for already inserted
-  // copies of the current data before adding it.
-
-  MutexLocker ml(ExceptionCache_lock);
-  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
-
-  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
-    target_entry = new ExceptionCache(exception,pc,handler);
-    add_exception_cache_entry(target_entry);
-  }
-}
-
-
-//-------------end of code for ExceptionCache--------------
-
-
-int nmethod::total_size() const {
-  return
-    consts_size()        +
-    insts_size()         +
-    stub_size()          +
-    scopes_data_size()   +
-    scopes_pcs_size()    +
-    handler_table_size() +
-    nul_chk_table_size();
-}
-
-const char* nmethod::compile_kind() const {
-  if (is_osr_method())     return "osr";
-  if (method() != NULL && is_native_method())  return "c2n";
-  return NULL;
-}
-
-// Fill in default values for various flag fields
-void nmethod::init_defaults() {
-  _state                      = alive;
-  _marked_for_reclamation     = 0;
-  _has_flushed_dependencies   = 0;
-  _speculatively_disconnected = 0;
-  _has_unsafe_access          = 0;
-  _has_method_handle_invokes  = 0;
-  _lazy_critical_native       = 0;
-  _has_wide_vectors           = 0;
-  _marked_for_deoptimization  = 0;
-  _lock_count                 = 0;
-  _stack_traversal_mark       = 0;
-  _unload_reported            = false;           // jvmti state
-
-#ifdef ASSERT
-  _oops_are_stale             = false;
-#endif
-
-  _oops_do_mark_link       = NULL;
-  _jmethod_id              = NULL;
-  _osr_link                = NULL;
-  _scavenge_root_link      = NULL;
-  _scavenge_root_state     = 0;
-  _saved_nmethod_link      = NULL;
-  _compiler                = NULL;
-#ifdef GRAAL
-  _graal_installed_code   = NULL;
-#endif
-#ifdef HAVE_DTRACE_H
-  _trap_offset             = 0;
-#endif // def HAVE_DTRACE_H
-}
-
-
-nmethod* nmethod::new_native_nmethod(methodHandle method,
-  int compile_id,
-  CodeBuffer *code_buffer,
-  int vep_offset,
-  int frame_complete,
-  int frame_size,
-  ByteSize basic_lock_owner_sp_offset,
-  ByteSize basic_lock_sp_offset,
-  OopMapSet* oop_maps) {
-  code_buffer->finalize_oop_references(method);
-  // create nmethod
-  nmethod* nm = NULL;
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
-    CodeOffsets offsets;
-    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
-    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
-    nm = new (native_nmethod_size)
-      nmethod(method(), native_nmethod_size, compile_id, &offsets,
-              code_buffer, frame_size,
-              basic_lock_owner_sp_offset, basic_lock_sp_offset,
-              oop_maps);
-    if (nm != NULL)  nmethod_stats.note_native_nmethod(nm);
-    if (PrintAssembly && nm != NULL)
-      Disassembler::decode(nm);
-  }
-  // verify nmethod
-  debug_only(if (nm) nm->verify();) // might block
-
-  if (nm != NULL) {
-    nm->log_new_nmethod();
-  }
-
-  return nm;
-}
-
-#ifdef HAVE_DTRACE_H
-nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
-                                     CodeBuffer *code_buffer,
-                                     int vep_offset,
-                                     int trap_offset,
-                                     int frame_complete,
-                                     int frame_size) {
-  code_buffer->finalize_oop_references(method);
-  // create nmethod
-  nmethod* nm = NULL;
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
-    CodeOffsets offsets;
-    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
-    offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
-    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
-
-    nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size);
-
-    if (nm != NULL)  nmethod_stats.note_nmethod(nm);
-    if (PrintAssembly && nm != NULL)
-      Disassembler::decode(nm);
-  }
-  // verify nmethod
-  debug_only(if (nm) nm->verify();) // might block
-
-  if (nm != NULL) {
-    nm->log_new_nmethod();
-  }
-
-  return nm;
-}
-
-#endif // def HAVE_DTRACE_H
-
-nmethod* nmethod::new_nmethod(methodHandle method,
-  int compile_id,
-  int entry_bci,
-  CodeOffsets* offsets,
-  int orig_pc_offset,
-  DebugInformationRecorder* debug_info,
-  Dependencies* dependencies,
-  CodeBuffer* code_buffer, int frame_size,
-  OopMapSet* oop_maps,
-  ExceptionHandlerTable* handler_table,
-  ImplicitExceptionTable* nul_chk_table,
-  AbstractCompiler* compiler,
-  int comp_level
-#ifdef GRAAL
-  , Handle installed_code
-#endif
-)
-{
-  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
-  code_buffer->finalize_oop_references(method);
-  // create nmethod
-  nmethod* nm = NULL;
-  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    int nmethod_size =
-      allocation_size(code_buffer, sizeof(nmethod))
-      + adjust_pcs_size(debug_info->pcs_size())
-      + round_to(dependencies->size_in_bytes() , oopSize)
-      + round_to(handler_table->size_in_bytes(), oopSize)
-      + round_to(nul_chk_table->size_in_bytes(), oopSize)
-      + round_to(debug_info->data_size()       , oopSize);
-    nm = new (nmethod_size)
-      nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
-              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
-              oop_maps,
-              handler_table,
-              nul_chk_table,
-              compiler,
-              comp_level
-#ifdef GRAAL
-              , installed_code
-#endif
-              );
-    if (nm != NULL) {
-      // To make dependency checking during class loading fast, record
-      // the nmethod dependencies in the classes it is dependent on.
-      // This allows the dependency checking code to simply walk the
-      // class hierarchy above the loaded class, checking only nmethods
-      // which are dependent on those classes.  The slow way is to
-      // check every nmethod for dependencies which makes it linear in
-      // the number of methods compiled.  For applications with a lot
-      // classes the slow way is too slow.
-      for (Dependencies::DepStream deps(nm); deps.next(); ) {
-        Klass* klass = deps.context_type();
-        if (klass == NULL)  continue;  // ignore things like evol_method
-
-        // record this nmethod as dependent on this klass
-        InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
-      }
-    }
-    if (nm != NULL)  nmethod_stats.note_nmethod(nm);
-    if (PrintAssembly && nm != NULL)
-      Disassembler::decode(nm);
-  }
-
-  // verify nmethod
-  debug_only(if (nm) nm->verify();) // might block
-
-  if (nm != NULL) {
-    nm->log_new_nmethod();
-  }
-
-  // done
-  return nm;
-}
-
-
-// For native wrappers
-nmethod::nmethod(
-  Method* method,
-  int nmethod_size,
-  int compile_id,
-  CodeOffsets* offsets,
-  CodeBuffer* code_buffer,
-  int frame_size,
-  ByteSize basic_lock_owner_sp_offset,
-  ByteSize basic_lock_sp_offset,
-  OopMapSet* oop_maps )
-  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
-             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
-  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
-  _native_basic_lock_sp_offset(basic_lock_sp_offset)
-{
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-    assert_locked_or_safepoint(CodeCache_lock);
-
-    init_defaults();
-    _method                  = method;
-    _entry_bci               = InvocationEntryBci;
-    // We have no exception handler or deopt handler make the
-    // values something that will never match a pc like the nmethod vtable entry
-    _exception_offset        = 0;
-    _deoptimize_offset       = 0;
-    _deoptimize_mh_offset    = 0;
-    _orig_pc_offset          = 0;
-
-    _consts_offset           = data_offset();
-    _stub_offset             = data_offset();
-    _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
-    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
-    _scopes_pcs_offset       = _scopes_data_offset;
-    _dependencies_offset     = _scopes_pcs_offset;
-    _handler_table_offset    = _dependencies_offset;
-    _nul_chk_table_offset    = _handler_table_offset;
-    _nmethod_end_offset      = _nul_chk_table_offset;
-    _compile_id              = compile_id;
-    _comp_level              = CompLevel_none;
-    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
-    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
-    _osr_entry_point         = NULL;
-    _exception_cache         = NULL;
-    _pc_desc_cache.reset_to(NULL);
-
-    code_buffer->copy_values_to(this);
-    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
-      CodeCache::add_scavenge_root_nmethod(this);
-    }
-    debug_only(verify_scavenge_root_oops());
-    CodeCache::commit(this);
-  }
-
-  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
-    ttyLocker ttyl;  // keep the following output all in one block
-    // This output goes directly to the tty, not the compiler log.
-    // To enable tools to match it up with the compilation activity,
-    // be sure to tag this tty output with the compile ID.
-    if (xtty != NULL) {
-      xtty->begin_head("print_native_nmethod");
-      xtty->method(_method);
-      xtty->stamp();
-      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
-    }
-    // print the header part first
-    print();
-    // then print the requested information
-    if (PrintNativeNMethods) {
-      print_code();
-      if (oop_maps != NULL) {
-        oop_maps->print();
-      }
-    }
-    if (PrintRelocations) {
-      print_relocations();
-    }
-    if (xtty != NULL) {
-      xtty->tail("print_native_nmethod");
-    }
-  }
-}
-
-// For dtrace wrappers
-#ifdef HAVE_DTRACE_H
-nmethod::nmethod(
-  Method* method,
-  int nmethod_size,
-  CodeOffsets* offsets,
-  CodeBuffer* code_buffer,
-  int frame_size)
-  : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
-             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
-  _native_receiver_sp_offset(in_ByteSize(-1)),
-  _native_basic_lock_sp_offset(in_ByteSize(-1))
-{
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-    assert_locked_or_safepoint(CodeCache_lock);
-
-    init_defaults();
-    _method                  = method;
-    _entry_bci               = InvocationEntryBci;
-    // We have no exception handler or deopt handler make the
-    // values something that will never match a pc like the nmethod vtable entry
-    _exception_offset        = 0;
-    _deoptimize_offset       = 0;
-    _deoptimize_mh_offset    = 0;
-    _unwind_handler_offset   = -1;
-    _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
-    _orig_pc_offset          = 0;
-    _consts_offset           = data_offset();
-    _stub_offset             = data_offset();
-    _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
-    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
-    _scopes_pcs_offset       = _scopes_data_offset;
-    _dependencies_offset     = _scopes_pcs_offset;
-    _handler_table_offset    = _dependencies_offset;
-    _nul_chk_table_offset    = _handler_table_offset;
-    _nmethod_end_offset      = _nul_chk_table_offset;
-    _compile_id              = 0;  // default
-    _comp_level              = CompLevel_none;
-    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
-    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
-    _osr_entry_point         = NULL;
-    _exception_cache         = NULL;
-    _pc_desc_cache.reset_to(NULL);
-
-    code_buffer->copy_values_to(this);
-    debug_only(verify_scavenge_root_oops());
-    CodeCache::commit(this);
-  }
-
-  if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
-    ttyLocker ttyl;  // keep the following output all in one block
-    // This output goes directly to the tty, not the compiler log.
-    // To enable tools to match it up with the compilation activity,
-    // be sure to tag this tty output with the compile ID.
-    if (xtty != NULL) {
-      xtty->begin_head("print_dtrace_nmethod");
-      xtty->method(_method);
-      xtty->stamp();
-      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
-    }
-    // print the header part first
-    print();
-    // then print the requested information
-    if (PrintNMethods) {
-      print_code();
-    }
-    if (PrintRelocations) {
-      print_relocations();
-    }
-    if (xtty != NULL) {
-      xtty->tail("print_dtrace_nmethod");
-    }
-  }
-}
-#endif // def HAVE_DTRACE_H
-
-void* nmethod::operator new(size_t size, int nmethod_size) {
-  // Always leave some room in the CodeCache for I2C/C2I adapters
-  if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL;
-  return CodeCache::allocate(nmethod_size);
-}
-
-
-nmethod::nmethod(
-  Method* method,
-  int nmethod_size,
-  int compile_id,
-  int entry_bci,
-  CodeOffsets* offsets,
-  int orig_pc_offset,
-  DebugInformationRecorder* debug_info,
-  Dependencies* dependencies,
-  CodeBuffer *code_buffer,
-  int frame_size,
-  OopMapSet* oop_maps,
-  ExceptionHandlerTable* handler_table,
-  ImplicitExceptionTable* nul_chk_table,
-  AbstractCompiler* compiler,
-  int comp_level
-#ifdef GRAAL
-  , Handle installed_code
-#endif
-  )
-  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
-             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
-  _native_receiver_sp_offset(in_ByteSize(-1)),
-  _native_basic_lock_sp_offset(in_ByteSize(-1))
-{
-  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-    assert_locked_or_safepoint(CodeCache_lock);
-
-    init_defaults();
-    _method                  = method;
-    _entry_bci               = entry_bci;
-    _compile_id              = compile_id;
-    _comp_level              = comp_level;
-    _compiler                = compiler;
-    _orig_pc_offset          = orig_pc_offset;
-
-    // Section offsets
-    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
-    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
-
-#ifdef GRAAL
-    _graal_installed_code = installed_code();
-
-    // graal produces no (!) stub section
-    if (offsets->value(CodeOffsets::Exceptions) != -1) {
-      _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
-    } else {
-      _exception_offset = -1;
-    }
-    if (offsets->value(CodeOffsets::Deopt) != -1) {
-      _deoptimize_offset       = code_offset()          + offsets->value(CodeOffsets::Deopt);
-    } else {
-      _deoptimize_offset = -1;
-    }
-    if (offsets->value(CodeOffsets::DeoptMH) != -1) {
-      _deoptimize_mh_offset  = code_offset()          + offsets->value(CodeOffsets::DeoptMH);
-    } else {
-      _deoptimize_mh_offset  = -1;
-    }
-#else
-    // Exception handler and deopt handler are in the stub section
-    assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
-    assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
-
-      _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
-      _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
-      if (offsets->value(CodeOffsets::DeoptMH) != -1) {
-        _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
-      } else {
-        _deoptimize_mh_offset  = -1;
-      }
-#endif
-    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
-      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
-    } else {
-      _unwind_handler_offset = -1;
-    }
-
-    _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
-    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
-
-    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
-    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
-    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
-    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
-    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
-
-    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
-    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
-    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
-    _exception_cache         = NULL;
-    _pc_desc_cache.reset_to(scopes_pcs_begin());
-
-    // Copy contents of ScopeDescRecorder to nmethod
-    code_buffer->copy_values_to(this);
-    debug_info->copy_to(this);
-    dependencies->copy_to(this);
-    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
-      CodeCache::add_scavenge_root_nmethod(this);
-    }
-    debug_only(verify_scavenge_root_oops());
-
-    CodeCache::commit(this);
-
-    // Copy contents of ExceptionHandlerTable to nmethod
-    handler_table->copy_to(this);
-    nul_chk_table->copy_to(this);
-
-    // we use the information of entry points to find out if a method is
-    // static or non static
-    assert(compiler->is_c2() ||
-           _method->is_static() == (entry_point() == _verified_entry_point),
-           " entry points must be same for static methods and vice versa");
-  }
-
-  bool printnmethods = PrintNMethods
-    || CompilerOracle::should_print(_method)
-    || CompilerOracle::has_option_string(_method, "PrintNMethods");
-  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
-    print_nmethod(printnmethods);
-  }
-}
-
-
-// Print a short set of xml attributes to identify this nmethod.  The
-// output should be embedded in some other element.
-void nmethod::log_identity(xmlStream* log) const {
-  log->print(" compile_id='%d'", compile_id());
-  const char* nm_kind = compile_kind();
-  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
-  if (compiler() != NULL) {
-    log->print(" compiler='%s'", compiler()->name());
-  }
-  if (TieredCompilation) {
-    log->print(" level='%d'", comp_level());
-  }
-}
-
-
-#define LOG_OFFSET(log, name)                    \
-  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
-    log->print(" " XSTR(name) "_offset='%d'"    , \
-               (intptr_t)name##_begin() - (intptr_t)this)
-
-
-void nmethod::log_new_nmethod() const {
-  if (LogCompilation && xtty != NULL) {
-    ttyLocker ttyl;
-    HandleMark hm;
-    xtty->begin_elem("nmethod");
-    log_identity(xtty);
-    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
-    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
-
-    LOG_OFFSET(xtty, relocation);
-    LOG_OFFSET(xtty, consts);
-    LOG_OFFSET(xtty, insts);
-    LOG_OFFSET(xtty, stub);
-    LOG_OFFSET(xtty, scopes_data);
-    LOG_OFFSET(xtty, scopes_pcs);
-    LOG_OFFSET(xtty, dependencies);
-    LOG_OFFSET(xtty, handler_table);
-    LOG_OFFSET(xtty, nul_chk_table);
-    LOG_OFFSET(xtty, oops);
-
-    xtty->method(method());
-    xtty->stamp();
-    xtty->end_elem();
-  }
-}
-
-#undef LOG_OFFSET
-
-
-// Print out more verbose output usually for a newly created nmethod.
-void nmethod::print_on(outputStream* st, const char* msg) const {
-  if (st != NULL) {
-    ttyLocker ttyl;
-    if (WizardMode) {
-      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
-      st->print_cr(" (" INTPTR_FORMAT ")", this);
-    } else {
-      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
-    }
-  }
-}
-
-
-void nmethod::print_nmethod(bool printmethod) {
-  ttyLocker ttyl;  // keep the following output all in one block
-  if (xtty != NULL) {
-    xtty->begin_head("print_nmethod");
-    xtty->stamp();
-    xtty->end_head();
-  }
-  // print the header part first
-  print();
-  // then print the requested information
-  if (printmethod) {
-    print_code();
-    print_pcs();
-    if (oop_maps()) {
-      oop_maps()->print();
-    }
-  }
-  if (PrintDebugInfo) {
-    print_scopes();
-  }
-  if (PrintRelocations) {
-    print_relocations();
-  }
-  if (PrintDependencies) {
-    print_dependencies();
-  }
-  if (PrintExceptionHandlers) {
-    print_handler_table();
-    print_nul_chk_table();
-  }
-  if (xtty != NULL) {
-    xtty->tail("print_nmethod");
-  }
-}
-
-
-// Promote one word from an assembly-time handle to a live embedded oop.
-inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
-  if (handle == NULL ||
-      // As a special case, IC oops are initialized to 1 or -1.
-      handle == (jobject) Universe::non_oop_word()) {
-    (*dest) = (oop) handle;
-  } else {
-    (*dest) = JNIHandles::resolve_non_null(handle);
-  }
-}
-
-
-// Have to have the same name because it's called by a template
-void nmethod::copy_values(GrowableArray<jobject>* array) {
-  int length = array->length();
-  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
-  oop* dest = oops_begin();
-  for (int index = 0 ; index < length; index++) {
-    initialize_immediate_oop(&dest[index], array->at(index));
-  }
-
-  // Now we can fix up all the oops in the code.  We need to do this
-  // in the code because the assembler uses jobjects as placeholders.
-  // The code and relocations have already been initialized by the
-  // CodeBlob constructor, so it is valid even at this early point to
-  // iterate over relocations and patch the code.
-  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
-}
-
-void nmethod::copy_values(GrowableArray<Metadata*>* array) {
-  int length = array->length();
-  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
-  Metadata** dest = metadata_begin();
-  for (int index = 0 ; index < length; index++) {
-    dest[index] = array->at(index);
-  }
-}
-
-bool nmethod::is_at_poll_return(address pc) {
-  RelocIterator iter(this, pc, pc+1);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::poll_return_type)
-      return true;
-  }
-  return false;
-}
-
-
-bool nmethod::is_at_poll_or_poll_return(address pc) {
-  RelocIterator iter(this, pc, pc+1);
-  while (iter.next()) {
-    relocInfo::relocType t = iter.type();
-    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
-      return true;
-  }
-  return false;
-}
-
-
-void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
-  // re-patch all oop-bearing instructions, just in case some oops moved
-  RelocIterator iter(this, begin, end);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* reloc = iter.oop_reloc();
-      if (initialize_immediates && reloc->oop_is_immediate()) {
-        oop* dest = reloc->oop_addr();
-        initialize_immediate_oop(dest, (jobject) *dest);
-      }
-      // Refresh the oop-related bits of this instruction.
-      reloc->fix_oop_relocation();
-    } else if (iter.type() == relocInfo::metadata_type) {
-      metadata_Relocation* reloc = iter.metadata_reloc();
-      reloc->fix_metadata_relocation();
-    }
-
-    // There must not be any interfering patches or breakpoints.
-    assert(!(iter.type() == relocInfo::breakpoint_type
-             && iter.breakpoint_reloc()->active()),
-           "no active breakpoint");
-  }
-}
-
-
-void nmethod::verify_oop_relocations() {
-  // Ensure sure that the code matches the current oop values
-  RelocIterator iter(this, NULL, NULL);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* reloc = iter.oop_reloc();
-      if (!reloc->oop_is_immediate()) {
-        reloc->verify_oop_relocation();
-      }
-    }
-  }
-}
-
-
-ScopeDesc* nmethod::scope_desc_at(address pc) {
-  PcDesc* pd = pc_desc_at(pc);
-  guarantee(pd != NULL, "scope must be present");
-  return new ScopeDesc(this, pd->scope_decode_offset(),
-                       pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
-                       pd->return_oop());
-}
-
-
-void nmethod::clear_inline_caches() {
-  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
-  if (is_zombie()) {
-    return;
-  }
-
-  RelocIterator iter(this);
-  while (iter.next()) {
-    iter.reloc()->clear_inline_cache();
-  }
-}
-
-
-void nmethod::cleanup_inline_caches() {
-
-  assert_locked_or_safepoint(CompiledIC_lock);
-
-  // If the method is not entrant or zombie then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (!is_in_use()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // This means that the low_boundary is going to be a little too high.
-    // This shouldn't matter, since oops of non-entrant methods are never used.
-    // In fact, why are we bothering to look at oops in a non-entrant method??
-  }
-
-  // Find all calls in an nmethod, and clear the ones that points to zombie methods
-  ResourceMark rm;
-  RelocIterator iter(this, low_boundary);
-  while(iter.next()) {
-    switch(iter.type()) {
-      case relocInfo::virtual_call_type:
-      case relocInfo::opt_virtual_call_type: {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
-        // Ok, to lookup references to zombies here
-        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
-        if( cb != NULL && cb->is_nmethod() ) {
-          nmethod* nm = (nmethod*)cb;
-          // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
-        }
-        break;
-      }
-      case relocInfo::static_call_type: {
-        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
-        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
-        if( cb != NULL && cb->is_nmethod() ) {
-          nmethod* nm = (nmethod*)cb;
-          // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
-        }
-        break;
-      }
-    }
-  }
-}
-
-// This is a private interface with the sweeper.
-void nmethod::mark_as_seen_on_stack() {
-  assert(is_not_entrant(), "must be a non-entrant method");
-  // Set the traversal mark to ensure that the sweeper does 2
-  // cleaning passes before moving to zombie.
-  set_stack_traversal_mark(NMethodSweeper::traversal_count());
-}
-
-// Tell if a non-entrant method can be converted to a zombie (i.e.,
-// there are no activations on the stack, not in use by the VM,
-// and not in use by the ServiceThread)
-bool nmethod::can_not_entrant_be_converted() {
-  assert(is_not_entrant(), "must be a non-entrant method");
-
-  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
-  // count can be greater than the stack traversal count before it hits the
-  // nmethod for the second time.
-  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
-         !is_locked_by_vm();
-}
-
-void nmethod::inc_decompile_count() {
-#ifndef GRAAL
-  if (!is_compiled_by_c2()) return;
-#endif
-  // Could be gated by ProfileTraps, but do not bother...
-  Method* m = method();
-  if (m == NULL)  return;
-  MethodData* mdo = m->method_data();
-  if (mdo == NULL)  return;
-  // There is a benign race here.  See comments in methodData.hpp.
-  mdo->inc_decompile_count();
-}
-
-void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
-
-  post_compiled_method_unload();
-
-  // Since this nmethod is being unloaded, make sure that dependencies
-  // recorded in instanceKlasses get flushed and pass non-NULL closure to
-  // indicate that this work is being done during a GC.
-  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
-  assert(is_alive != NULL, "Should be non-NULL");
-  // A non-NULL is_alive closure indicates that this is being called during GC.
-  flush_dependencies(is_alive);
-
-  // Break cycle between nmethod & method
-  if (TraceClassUnloading && WizardMode) {
-    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
-                  " unloadable], Method*(" INTPTR_FORMAT
-                  "), cause(" INTPTR_FORMAT ")",
-                  this, (address)_method, (address)cause);
-    if (!Universe::heap()->is_gc_active())
-      cause->klass()->print();
-  }
-  // Unlink the osr method, so we do not look this up again
-  if (is_osr_method()) {
-    invalidate_osr_method();
-  }
-  // If _method is already NULL the Method* is about to be unloaded,
-  // so we don't have to break the cycle. Note that it is possible to
-  // have the Method* live here, in case we unload the nmethod because
-  // it is pointing to some oop (other than the Method*) being unloaded.
-  if (_method != NULL) {
-    // OSR methods point to the Method*, but the Method* does not
-    // point back!
-    if (_method->code() == this) {
-      _method->clear_code(); // Break a cycle
-    }
-    _method = NULL;            // Clear the method of this dead nmethod
-  }
-
-#ifdef GRAAL
-    if (_graal_installed_code != NULL) {
-      HotSpotInstalledCode::set_nmethod(_graal_installed_code, 0);
-      _graal_installed_code = NULL;
-    }
-#endif
-
-  // Make the class unloaded - i.e., change state and notify sweeper
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  if (is_in_use()) {
-    // Transitioning directly from live to unloaded -- so
-    // we need to force a cache clean-up; remember this
-    // for later on.
-    CodeCache::set_needs_cache_clean(true);
-  }
-  _state = unloaded;
-
-  // Log the unloading.
-  log_state_change();
-
-  // The Method* is gone at this point
-  assert(_method == NULL, "Tautology");
-
-  set_osr_link(NULL);
-  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
-  NMethodSweeper::notify(this);
-}
-
-void nmethod::invalidate_osr_method() {
-  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
-  // Remove from list of active nmethods
-  if (method() != NULL)
-    method()->method_holder()->remove_osr_nmethod(this);
-  // Set entry as invalid
-  _entry_bci = InvalidOSREntryBci;
-}
-
-void nmethod::log_state_change() const {
-  if (LogCompilation) {
-    if (xtty != NULL) {
-      ttyLocker ttyl;  // keep the following output all in one block
-      if (_state == unloaded) {
-        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
-                         os::current_thread_id());
-      } else {
-        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
-                         os::current_thread_id(),
-                         (_state == zombie ? " zombie='1'" : ""));
-      }
-      log_identity(xtty);
-      xtty->stamp();
-      xtty->end_elem();
-    }
-  }
-  if (PrintCompilation && _state != unloaded) {
-    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
-  }
-}
-
-// Common functionality for both make_not_entrant and make_zombie
-bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
-  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
-  assert(!is_zombie(), "should not already be a zombie");
-
-  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
-  nmethodLocker nml(this);
-  methodHandle the_method(method());
-  No_Safepoint_Verifier nsv;
-
-  {
-    // invalidate osr nmethod before acquiring the patching lock since
-    // they both acquire leaf locks and we don't want a deadlock.
-    // This logic is equivalent to the logic below for patching the
-    // verified entry point of regular methods.
-    if (is_osr_method()) {
-      // this effectively makes the osr nmethod not entrant
-      invalidate_osr_method();
-    }
-
-    // Enter critical section.  Does not block for safepoint.
-    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
-
-    if (_state == state) {
-      // another thread already performed this transition so nothing
-      // to do, but return false to indicate this.
-      return false;
-    }
-
-#ifdef GRAAL
-    if (_graal_installed_code != NULL) {
-      HotSpotInstalledCode::set_nmethod(_graal_installed_code, 0);
-      _graal_installed_code = NULL;
-    }
-#endif
-
-    // The caller can be calling the method statically or through an inline
-    // cache call.
-    if (!is_osr_method() && !is_not_entrant()) {
-      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
-                  SharedRuntime::get_handle_wrong_method_stub());
-    }
-
-    if (is_in_use()) {
-      // It's a true state change, so mark the method as decompiled.
-      // Do it only for transition from alive.
-      inc_decompile_count();
-    }
-
-    // Change state
-    _state = state;
-
-    // Log the transition once
-    log_state_change();
-
-    // Remove nmethod from method.
-    // We need to check if both the _code and _from_compiled_code_entry_point
-    // refer to this nmethod because there is a race in setting these two fields
-    // in Method* as seen in bugid 4947125.
-    // If the vep() points to the zombie nmethod, the memory for the nmethod
-    // could be flushed and the compiler and vtable stubs could still call
-    // through it.
-    if (method() != NULL && (method()->code() == this ||
-                             method()->from_compiled_entry() == verified_entry_point())) {
-      HandleMark hm;
-      method()->clear_code();
-    }
-
-    if (state == not_entrant) {
-      mark_as_seen_on_stack();
-    }
-
-  } // leave critical region under Patching_lock
-
-  // When the nmethod becomes zombie it is no longer alive so the
-  // dependencies must be flushed.  nmethods in the not_entrant
-  // state will be flushed later when the transition to zombie
-  // happens or they get unloaded.
-  if (state == zombie) {
-    {
-      // Flushing dependecies must be done before any possible
-      // safepoint can sneak in, otherwise the oops used by the
-      // dependency logic could have become stale.
-      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      flush_dependencies(NULL);
-    }
-
-    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
-    // event and it hasn't already been reported for this nmethod then
-    // report it now. The event may have been reported earilier if the GC
-    // marked it for unloading). JvmtiDeferredEventQueue support means
-    // we no longer go to a safepoint here.
-    post_compiled_method_unload();
-
-#ifdef ASSERT
-    // It's no longer safe to access the oops section since zombie
-    // nmethods aren't scanned for GC.
-    _oops_are_stale = true;
-#endif
-  } else {
-    assert(state == not_entrant, "other cases may need to be handled differently");
-  }
-
-  if (TraceCreateZombies) {
-    tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
-  }
-
-  // Make sweeper aware that there is a zombie method that needs to be removed
-  NMethodSweeper::notify(this);
-
-  return true;
-}
-
-void nmethod::flush() {
-  // Note that there are no valid oops in the nmethod anymore.
-  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
-  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
-
-  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
-  assert_locked_or_safepoint(CodeCache_lock);
-
-  // completely deallocate this method
-  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
-  if (PrintMethodFlushing) {
-    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
-        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
-  }
-
-  // We need to deallocate any ExceptionCache data.
-  // Note that we do not need to grab the nmethod lock for this, it
-  // better be thread safe if we're disposing of it!
-  ExceptionCache* ec = exception_cache();
-  set_exception_cache(NULL);
-  while(ec != NULL) {
-    ExceptionCache* next = ec->next();
-    delete ec;
-    ec = next;
-  }
-
-  if (on_scavenge_root_list()) {
-    CodeCache::drop_scavenge_root_nmethod(this);
-  }
-
-  if (is_speculatively_disconnected()) {
-    CodeCache::remove_saved_code(this);
-  }
-
-#ifdef SHARK
-  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
-#endif // SHARK
-
-  ((CodeBlob*)(this))->flush();
-
-  CodeCache::free(this);
-}
-
-
-//
-// Notify all classes this nmethod is dependent on that it is no
-// longer dependent. This should only be called in two situations.
-// First, when a nmethod transitions to a zombie all dependents need
-// to be clear.  Since zombification happens at a safepoint there's no
-// synchronization issues.  The second place is a little more tricky.
-// During phase 1 of mark sweep class unloading may happen and as a
-// result some nmethods may get unloaded.  In this case the flushing
-// of dependencies must happen during phase 1 since after GC any
-// dependencies in the unloaded nmethod won't be updated, so
-// traversing the dependency information in unsafe.  In that case this
-// function is called with a non-NULL argument and this function only
-// notifies instanceKlasses that are reachable
-
-void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
-  "is_alive is non-NULL if and only if we are called during GC");
-  if (!has_flushed_dependencies()) {
-    set_has_flushed_dependencies();
-    for (Dependencies::DepStream deps(this); deps.next(); ) {
-      Klass* klass = deps.context_type();
-      if (klass == NULL)  continue;  // ignore things like evol_method
-
-      // During GC the is_alive closure is non-NULL, and is used to
-      // determine liveness of dependees that need to be updated.
-      if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
-        InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
-      }
-    }
-  }
-}
-
-
-// If this oop is not live, the nmethod can be unloaded.
-bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
-  assert(root != NULL, "just checking");
-  oop obj = *root;
-  if (obj == NULL || is_alive->do_object_b(obj)) {
-      return false;
-  }
-
-  // If ScavengeRootsInCode is true, an nmethod might be unloaded
-  // simply because one of its constant oops has gone dead.
-  // No actual classes need to be unloaded in order for this to occur.
-  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
-  make_unloaded(is_alive, obj);
-  return true;
-}
-
-// ------------------------------------------------------------------
-// post_compiled_method_load_event
-// new method for install_code() path
-// Transfer information from compilation to jvmti
-void nmethod::post_compiled_method_load_event() {
-
-  Method* moop = method();
-#ifndef USDT2
-  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
-      moop->klass_name()->bytes(),
-      moop->klass_name()->utf8_length(),
-      moop->name()->bytes(),
-      moop->name()->utf8_length(),
-      moop->signature()->bytes(),
-      moop->signature()->utf8_length(),
-      insts_begin(), insts_size());
-#else /* USDT2 */
-  HOTSPOT_COMPILED_METHOD_LOAD(
-      (char *) moop->klass_name()->bytes(),
-      moop->klass_name()->utf8_length(),
-      (char *) moop->name()->bytes(),
-      moop->name()->utf8_length(),
-      (char *) moop->signature()->bytes(),
-      moop->signature()->utf8_length(),
-      insts_begin(), insts_size());
-#endif /* USDT2 */
-
-  if (JvmtiExport::should_post_compiled_method_load() ||
-      JvmtiExport::should_post_compiled_method_unload()) {
-    get_and_cache_jmethod_id();
-  }
-
-  if (JvmtiExport::should_post_compiled_method_load()) {
-    // Let the Service thread (which is a real Java thread) post the event
-    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
-    JvmtiDeferredEventQueue::enqueue(
-      JvmtiDeferredEvent::compiled_method_load_event(this));
-  }
-}
-
-jmethodID nmethod::get_and_cache_jmethod_id() {
-  if (_jmethod_id == NULL) {
-    // Cache the jmethod_id since it can no longer be looked up once the
-    // method itself has been marked for unloading.
-    _jmethod_id = method()->jmethod_id();
-  }
-  return _jmethod_id;
-}
-
-void nmethod::post_compiled_method_unload() {
-  if (unload_reported()) {
-    // During unloading we transition to unloaded and then to zombie
-    // and the unloading is reported during the first transition.
-    return;
-  }
-
-  assert(_method != NULL && !is_unloaded(), "just checking");
-  DTRACE_METHOD_UNLOAD_PROBE(method());
-
-  // If a JVMTI agent has enabled the CompiledMethodUnload event then
-  // post the event. Sometime later this nmethod will be made a zombie
-  // by the sweeper but the Method* will not be valid at that point.
-  // If the _jmethod_id is null then no load event was ever requested
-  // so don't bother posting the unload.  The main reason for this is
-  // that the jmethodID is a weak reference to the Method* so if
-  // it's being unloaded there's no way to look it up since the weak
-  // ref will have been cleared.
-  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
-    assert(!unload_reported(), "already unloaded");
-    JvmtiDeferredEvent event =
-      JvmtiDeferredEvent::compiled_method_unload_event(this,
-          _jmethod_id, insts_begin());
-    if (SafepointSynchronize::is_at_safepoint()) {
-      // Don't want to take the queueing lock. Add it as pending and
-      // it will get enqueued later.
-      JvmtiDeferredEventQueue::add_pending_event(event);
-    } else {
-      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
-      JvmtiDeferredEventQueue::enqueue(event);
-    }
-  }
-
-  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
-  // any time. As the nmethod is being unloaded now we mark it has
-  // having the unload event reported - this will ensure that we don't
-  // attempt to report the event in the unlikely scenario where the
-  // event is enabled at the time the nmethod is made a zombie.
-  set_unload_reported();
-}
-
-// This is called at the end of the strong tracing/marking phase of a
-// GC to unload an nmethod if it contains otherwise unreachable
-// oops.
-
-void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
-  // Make sure the oop's ready to receive visitors
-  assert(!is_zombie() && !is_unloaded(),
-         "should not call follow on zombie or unloaded nmethod");
-
-  // If the method is not entrant then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-
-  // The RedefineClasses() API can cause the class unloading invariant
-  // to no longer be true. See jvmtiExport.hpp for details.
-  // Also, leave a debugging breadcrumb in local flag.
-  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
-  if (a_class_was_redefined) {
-    // This set of the unloading_occurred flag is done before the
-    // call to post_compiled_method_unload() so that the unloading
-    // of this nmethod is reported.
-    unloading_occurred = true;
-  }
-
-#ifdef GRAAL
-  // Follow Graal method
-  if (_graal_installed_code != NULL && can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) {
-    return;
-  }
-#endif
-
-  // Exception cache
-  ExceptionCache* ec = exception_cache();
-  while (ec != NULL) {
-    Klass* ex_klass = ec->exception_type();
-    ExceptionCache* next_ec = ec->next();
-    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
-      remove_from_exception_cache(ec);
-    }
-    ec = next_ec;
-  }
-
-  // If class unloading occurred we first iterate over all inline caches and
-  // clear ICs where the cached oop is referring to an unloaded klass or method.
-  // The remaining live cached oops will be traversed in the relocInfo::oop_type
-  // iteration below.
-  if (unloading_occurred) {
-    RelocIterator iter(this, low_boundary);
-    while(iter.next()) {
-      if (iter.type() == relocInfo::virtual_call_type) {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
-        if (ic->is_icholder_call()) {
-          // The only exception is compiledICHolder oops which may
-          // yet be marked below. (We check this further below).
-          CompiledICHolder* cichk_oop = ic->cached_icholder();
-          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
-              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
-              continue;
-            }
-        } else {
-          Metadata* ic_oop = ic->cached_metadata();
-          if (ic_oop != NULL) {
-            if (ic_oop->is_klass()) {
-              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else if (ic_oop->is_method()) {
-              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else {
-              ShouldNotReachHere();
-            }
-          }
-          }
-          ic->set_to_clean();
-      }
-    }
-  }
-
-  // Compiled code
-  {
-  RelocIterator iter(this, low_boundary);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* r = iter.oop_reloc();
-      // In this loop, we must only traverse those oops directly embedded in
-      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
-      assert(1 == (r->oop_is_immediate()) +
-                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
-             "oop must be found in exactly one place");
-      if (r->oop_is_immediate() && r->oop_value() != NULL) {
-        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
-          return;
-        }
-      }
-    }
-  }
-  }
-
-
-  // Scopes
-  for (oop* p = oops_begin(); p < oops_end(); p++) {
-    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
-    if (can_unload(is_alive, p, unloading_occurred)) {
-      return;
-    }
-  }
-
-  // Ensure that all metadata is still alive
-  verify_metadata_loaders(low_boundary, is_alive);
-}
-
-#ifdef ASSERT
-
-class CheckClass : AllStatic {
-  static BoolObjectClosure* _is_alive;
-
-  // Check class_loader is alive for this bit of metadata.
-  static void check_class(Metadata* md) {
-    Klass* klass = NULL;
-    if (md->is_klass()) {
-      klass = ((Klass*)md);
-    } else if (md->is_method()) {
-      klass = ((Method*)md)->method_holder();
-    } else if (md->is_methodData()) {
-      klass = ((MethodData*)md)->method()->method_holder();
-    } else {
-      md->print();
-      ShouldNotReachHere();
-    }
-    assert(klass->is_loader_alive(_is_alive), "must be alive");
-  }
- public:
-  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
-    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
-    _is_alive = is_alive;
-    nm->metadata_do(check_class);
-  }
-};
-
-// This is called during a safepoint so can use static data
-BoolObjectClosure* CheckClass::_is_alive = NULL;
-#endif // ASSERT
-
-
-// Processing of oop references should have been sufficient to keep
-// all strong references alive.  Any weak references should have been
-// cleared as well.  Visit all the metadata and ensure that it's
-// really alive.
-void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
-#ifdef ASSERT
-  RelocIterator iter(this, low_boundary);
-  while (iter.next()) {
-    // static_stub_Relocations may have dangling references to
-    // Method*s so trim them out here.  Otherwise it looks like
-    // compiled code is maintaining a link to dead metadata.
-    address static_call_addr = NULL;
-    if (iter.type() == relocInfo::opt_virtual_call_type) {
-      CompiledIC* cic = CompiledIC_at(iter.reloc());
-      if (!cic->is_call_to_interpreted()) {
-        static_call_addr = iter.addr();
-        cic->set_to_clean();
-      }
-    } else if (iter.type() == relocInfo::static_call_type) {
-      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
-      if (!csc->is_call_to_interpreted()) {
-        static_call_addr = iter.addr();
-        csc->set_to_clean();
-      }
-    }
-    if (static_call_addr != NULL) {
-      RelocIterator sciter(this, low_boundary);
-      while (sciter.next()) {
-        if (sciter.type() == relocInfo::static_stub_type &&
-            sciter.static_stub_reloc()->static_call() == static_call_addr) {
-          sciter.static_stub_reloc()->clear_inline_cache();
-        }
-      }
-    }
-  }
-  // Check that the metadata embedded in the nmethod is alive
-  CheckClass::do_check_class(is_alive, this);
-#endif
-}
-
-
-// Iterate over metadata calling this function.   Used by RedefineClasses
-void nmethod::metadata_do(void f(Metadata*)) {
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-  {
-    // Visit all immediate references that are embedded in the instruction stream.
-    RelocIterator iter(this, low_boundary);
-    while (iter.next()) {
-      if (iter.type() == relocInfo::metadata_type ) {
-        metadata_Relocation* r = iter.metadata_reloc();
-        // In this lmetadata, we must only follow those metadatas directly embedded in
-        // the code.  Other metadatas (oop_index>0) are seen as part of
-        // the metadata section below.
-        assert(1 == (r->metadata_is_immediate()) +
-               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
-               "metadata must be found in exactly one place");
-        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
-          Metadata* md = r->metadata_value();
-          f(md);
-        }
-      }
-    }
-  }
-
-  // Visit the metadata section
-  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
-    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
-    Metadata* md = *p;
-    f(md);
-  }
-  // Call function Method*, not embedded in these other places.
-  if (_method != NULL) f(_method);
-}
-
-
-// This method is called twice during GC -- once while
-// tracing the "active" nmethods on thread stacks during
-// the (strong) marking phase, and then again when walking
-// the code cache contents during the weak roots processing
-// phase. The two uses are distinguished by means of the
-// 'do_strong_roots_only' flag, which is true in the first
-// case. We want to walk the weak roots in the nmethod
-// only in the second case. The weak roots in the nmethod
-// are the oops in the ExceptionCache and the InlineCache
-// oops.
-void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
-  // make sure the oops ready to receive visitors
-  assert(!is_zombie() && !is_unloaded(),
-         "should not call follow on zombie or unloaded nmethod");
-
-  // If the method is not entrant or zombie then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-
-#ifdef GRAAL
-  if (_graal_installed_code != NULL) {
-    f->do_oop((oop*) &_graal_installed_code);
-  }
-#endif
-
-  RelocIterator iter(this, low_boundary);
-
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type ) {
-      oop_Relocation* r = iter.oop_reloc();
-      // In this loop, we must only follow those oops directly embedded in
-      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
-      assert(1 == (r->oop_is_immediate()) +
-                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
-             "oop must be found in exactly one place");
-      if (r->oop_is_immediate() && r->oop_value() != NULL) {
-        f->do_oop(r->oop_addr());
-      }
-    }
-  }
-
-  // Scopes
-  // This includes oop constants not inlined in the code stream.
-  for (oop* p = oops_begin(); p < oops_end(); p++) {
-    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
-    f->do_oop(p);
-  }
-}
-
-#define NMETHOD_SENTINEL ((nmethod*)badAddress)
-
-nmethod* volatile nmethod::_oops_do_mark_nmethods;
-
-// An nmethod is "marked" if its _mark_link is set non-null.
-// Even if it is the end of the linked list, it will have a non-null link value,
-// as long as it is on the list.
-// This code must be MP safe, because it is used from parallel GC passes.
-bool nmethod::test_set_oops_do_mark() {
-  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
-  nmethod* observed_mark_link = _oops_do_mark_link;
-  if (observed_mark_link == NULL) {
-    // Claim this nmethod for this thread to mark.
-    observed_mark_link = (nmethod*)
-      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
-    if (observed_mark_link == NULL) {
-
-      // Atomically append this nmethod (now claimed) to the head of the list:
-      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
-      for (;;) {
-        nmethod* required_mark_nmethods = observed_mark_nmethods;
-        _oops_do_mark_link = required_mark_nmethods;
-        observed_mark_nmethods = (nmethod*)
-          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
-        if (observed_mark_nmethods == required_mark_nmethods)
-          break;
-      }
-      // Mark was clear when we first saw this guy.
-      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
-      return false;
-    }
-  }
-  // On fall through, another racing thread marked this nmethod before we did.
-  return true;
-}
-
-void nmethod::oops_do_marking_prologue() {
-  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
-  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
-  // We use cmpxchg_ptr instead of regular assignment here because the user
-  // may fork a bunch of threads, and we need them all to see the same state.
-  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
-  guarantee(observed == NULL, "no races in this sequential code");
-}
-
-void nmethod::oops_do_marking_epilogue() {
-  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
-  nmethod* cur = _oops_do_mark_nmethods;
-  while (cur != NMETHOD_SENTINEL) {
-    assert(cur != NULL, "not NULL-terminated");
-    nmethod* next = cur->_oops_do_mark_link;
-    cur->_oops_do_mark_link = NULL;
-    cur->fix_oop_relocations();
-    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
-    cur = next;
-  }
-  void* required = _oops_do_mark_nmethods;
-  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
-  guarantee(observed == required, "no races in this sequential code");
-  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
-}
-
-class DetectScavengeRoot: public OopClosure {
-  bool     _detected_scavenge_root;
-public:
-  DetectScavengeRoot() : _detected_scavenge_root(false)
-  { NOT_PRODUCT(_print_nm = NULL); }
-  bool detected_scavenge_root() { return _detected_scavenge_root; }
-  virtual void do_oop(oop* p) {
-    if ((*p) != NULL && (*p)->is_scavengable()) {
-      NOT_PRODUCT(maybe_print(p));
-      _detected_scavenge_root = true;
-    }
-  }
-  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-
-#ifndef PRODUCT
-  nmethod* _print_nm;
-  void maybe_print(oop* p) {
-    if (_print_nm == NULL)  return;
-    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
-    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
-                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
-                  (intptr_t)(*p), (intptr_t)p);
-    (*p)->print();
-  }
-#endif //PRODUCT
-};
-
-bool nmethod::detect_scavenge_root_oops() {
-  DetectScavengeRoot detect_scavenge_root;
-  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
-  oops_do(&detect_scavenge_root);
-  return detect_scavenge_root.detected_scavenge_root();
-}
-
-// Method that knows how to preserve outgoing arguments at call. This method must be
-// called with a frame corresponding to a Java invoke
-void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
-#ifndef SHARK
-  if (!method()->is_native()) {
-    SimpleScopeDesc ssd(this, fr.pc());
-    Bytecode_invoke call(ssd.method(), ssd.bci());
-    // compiled invokedynamic call sites have an implicit receiver at
-    // resolution time, so make sure it gets GC'ed.
-    bool has_receiver = !call.is_invokestatic();
-    Symbol* signature = call.signature();
-    fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
-  }
-#endif // !SHARK
-}
-
-
-oop nmethod::embeddedOop_at(u_char* p) {
-  RelocIterator iter(this, p, p + 1);
-  while (iter.next())
-    if (iter.type() == relocInfo::oop_type) {
-      return iter.oop_reloc()->oop_value();
-    }
-  return NULL;
-}
-
-
-inline bool includes(void* p, void* from, void* to) {
-  return from <= p && p < to;
-}
-
-
-void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
-  assert(count >= 2, "must be sentinel values, at least");
-
-#ifdef ASSERT
-  // must be sorted and unique; we do a binary search in find_pc_desc()
-  int prev_offset = pcs[0].pc_offset();
-  assert(prev_offset == PcDesc::lower_offset_limit,
-         "must start with a sentinel");
-  for (int i = 1; i < count; i++) {
-    int this_offset = pcs[i].pc_offset();
-    assert(this_offset > prev_offset, "offsets must be sorted");
-    prev_offset = this_offset;
-  }
-  assert(prev_offset == PcDesc::upper_offset_limit,
-         "must end with a sentinel");
-#endif //ASSERT
-
-  // Search for MethodHandle invokes and tag the nmethod.
-  for (int i = 0; i < count; i++) {
-    if (pcs[i].is_method_handle_invoke()) {
-      set_has_method_handle_invokes(true);
-      break;
-    }
-  }
-  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
-
-  int size = count * sizeof(PcDesc);
-  assert(scopes_pcs_size() >= size, "oob");
-  memcpy(scopes_pcs_begin(), pcs, size);
-
-  // Adjust the final sentinel downward.
-  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
-  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
-  last_pc->set_pc_offset(content_size() + 1);
-  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
-    // Fill any rounding gaps with copies of the last record.
-    last_pc[1] = last_pc[0];
-  }
-  // The following assert could fail if sizeof(PcDesc) is not
-  // an integral multiple of oopSize (the rounding term).
-  // If it fails, change the logic to always allocate a multiple
-  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
-  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
-}
-
-void nmethod::copy_scopes_data(u_char* buffer, int size) {
-  assert(scopes_data_size() >= size, "oob");
-  memcpy(scopes_data_begin(), buffer, size);
-}
-
-
-#ifdef ASSERT
-static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
-  PcDesc* lower = nm->scopes_pcs_begin();
-  PcDesc* upper = nm->scopes_pcs_end();
-  lower += 1; // exclude initial sentinel
-  PcDesc* res = NULL;
-  for (PcDesc* p = lower; p < upper; p++) {
-    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
-    if (match_desc(p, pc_offset, approximate)) {
-      if (res == NULL)
-        res = p;
-      else
-        res = (PcDesc*) badAddress;
-    }
-  }
-  return res;
-}
-#endif
-
-
-// Finds a PcDesc with real-pc equal to "pc"
-PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
-  address base_address = code_begin();
-  if ((pc < base_address) ||
-      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
-    return NULL;  // PC is wildly out of range
-  }
-  int pc_offset = (int) (pc - base_address);
-
-  // Check the PcDesc cache if it contains the desired PcDesc
-  // (This as an almost 100% hit rate.)
-  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
-  if (res != NULL) {
-    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
-    return res;
-  }
-
-  // Fallback algorithm: quasi-linear search for the PcDesc
-  // Find the last pc_offset less than the given offset.
-  // The successor must be the required match, if there is a match at all.
-  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
-  PcDesc* lower = scopes_pcs_begin();
-  PcDesc* upper = scopes_pcs_end();
-  upper -= 1; // exclude final sentinel
-  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
-
-#define assert_LU_OK \
-  /* invariant on lower..upper during the following search: */ \
-  assert(lower->pc_offset() <  pc_offset, "sanity"); \
-  assert(upper->pc_offset() >= pc_offset, "sanity")
-  assert_LU_OK;
-
-  // Use the last successful return as a split point.
-  PcDesc* mid = _pc_desc_cache.last_pc_desc();
-  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
-  if (mid->pc_offset() < pc_offset) {
-    lower = mid;
-  } else {
-    upper = mid;
-  }
-
-  // Take giant steps at first (4096, then 256, then 16, then 1)
-  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
-  const int RADIX = (1 << LOG2_RADIX);
-  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
-    while ((mid = lower + step) < upper) {
-      assert_LU_OK;
-      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
-      if (mid->pc_offset() < pc_offset) {
-        lower = mid;
-      } else {
-        upper = mid;
-        break;
-      }
-    }
-    assert_LU_OK;
-  }
-
-  // Sneak up on the value with a linear search of length ~16.
-  while (true) {
-    assert_LU_OK;
-    mid = lower + 1;
-    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
-    if (mid->pc_offset() < pc_offset) {
-      lower = mid;
-    } else {
-      upper = mid;
-      break;
-    }
-  }
-#undef assert_LU_OK
-
-  if (match_desc(upper, pc_offset, approximate)) {
-    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
-    _pc_desc_cache.add_pc_desc(upper);
-    return upper;
-  } else {
-    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
-    return NULL;
-  }
-}
-
-
-bool nmethod::check_all_dependencies() {
-  bool found_check = false;
-  // wholesale check of all dependencies
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.check_dependency() != NULL) {
-      found_check = true;
-      NOT_DEBUG(break);
-    }
-  }
-  return found_check;  // tell caller if we found anything
-}
-
-bool nmethod::check_dependency_on(DepChange& changes) {
-  // What has happened:
-  // 1) a new class dependee has been added
-  // 2) dependee and all its super classes have been marked
-  bool found_check = false;  // set true if we are upset
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    // Evaluate only relevant dependencies.
-    if (deps.spot_check_dependency_at(changes) != NULL) {
-      found_check = true;
-      NOT_DEBUG(break);
-    }
-  }
-  return found_check;
-}
-
-bool nmethod::is_evol_dependent_on(Klass* dependee) {
-  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
-  Array<Method*>* dependee_methods = dependee_ik->methods();
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.type() == Dependencies::evol_method) {
-      Method* method = deps.method_argument(0);
-      for (int j = 0; j < dependee_methods->length(); j++) {
-        if (dependee_methods->at(j) == method) {
-          // RC_TRACE macro has an embedded ResourceMark
-          RC_TRACE(0x01000000,
-            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
-            _method->method_holder()->external_name(),
-            _method->name()->as_C_string(),
-            _method->signature()->as_C_string(), compile_id(),
-            method->method_holder()->external_name(),
-            method->name()->as_C_string(),
-            method->signature()->as_C_string()));
-          if (TraceDependencies || LogCompilation)
-            deps.log_dependency(dependee);
-          return true;
-        }
-      }
-    }
-  }
-  return false;
-}
-
-// Called from mark_for_deoptimization, when dependee is invalidated.
-bool nmethod::is_dependent_on_method(Method* dependee) {
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.type() != Dependencies::evol_method)
-      continue;
-    Method* method = deps.method_argument(0);
-    if (method == dependee) return true;
-  }
-  return false;
-}
-
-
-bool nmethod::is_patchable_at(address instr_addr) {
-  assert(insts_contains(instr_addr), "wrong nmethod used");
-  if (is_zombie()) {
-    // a zombie may never be patched
-    return false;
-  }
-  return true;
-}
-
-
-address nmethod::continuation_for_implicit_exception(address pc) {
-  // Exception happened outside inline-cache check code => we are inside
-  // an active nmethod => use cpc to determine a return address
-  int exception_offset = pc - code_begin();
-  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
-#ifdef ASSERT
-  if (cont_offset == 0) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
-    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
-    HandleMark hm(thread);
-    ResourceMark rm(thread);
-    CodeBlob* cb = CodeCache::find_blob(pc);
-    assert(cb != NULL && cb == this, "");
-    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
-    print();
-    method()->print_codes();
-    print_code();
-    print_pcs();
-  }
-#endif
-  if (cont_offset == 0) {
-    // Let the normal error handling report the exception
-    return NULL;
-  }
-  return code_begin() + cont_offset;
-}
-
-
-
-void nmethod_init() {
-  // make sure you didn't forget to adjust the filler fields
-  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
-}
-
-
-//-------------------------------------------------------------------------------------------
-
-
-// QQQ might we make this work from a frame??
-nmethodLocker::nmethodLocker(address pc) {
-  CodeBlob* cb = CodeCache::find_blob(pc);
-  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
-  _nm = (nmethod*)cb;
-  lock_nmethod(_nm);
-}
-
-// Only JvmtiDeferredEvent::compiled_method_unload_event()
-// should pass zombie_ok == true.
-void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
-  if (nm == NULL)  return;
-  Atomic::inc(&nm->_lock_count);
-  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
-}
-
-void nmethodLocker::unlock_nmethod(nmethod* nm) {
-  if (nm == NULL)  return;
-  Atomic::dec(&nm->_lock_count);
-  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
-}
-
-
-// -----------------------------------------------------------------------------
-// nmethod::get_deopt_original_pc
-//
-// Return the original PC for the given PC if:
-// (a) the given PC belongs to a nmethod and
-// (b) it is a deopt PC
-address nmethod::get_deopt_original_pc(const frame* fr) {
-  if (fr->cb() == NULL)  return NULL;
-
-  nmethod* nm = fr->cb()->as_nmethod_or_null();
-  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
-    return nm->get_original_pc(fr);
-
-  return NULL;
-}
-
-
-// -----------------------------------------------------------------------------
-// MethodHandle
-
-bool nmethod::is_method_handle_return(address return_pc) {
-  if (!has_method_handle_invokes())  return false;
-  PcDesc* pd = pc_desc_at(return_pc);
-  if (pd == NULL)
-    return false;
-  return pd->is_method_handle_invoke();
-}
-
-
-// -----------------------------------------------------------------------------
-// Verification
-
-class VerifyOopsClosure: public OopClosure {
-  nmethod* _nm;
-  bool     _ok;
-public:
-  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
-  bool ok() { return _ok; }
-  virtual void do_oop(oop* p) {
-    if ((*p) == NULL || (*p)->is_oop())  return;
-    if (_ok) {
-      _nm->print_nmethod(true);
-      _ok = false;
-    }
-    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
-  }
-  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-
-void nmethod::verify() {
-
-  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
-  // seems odd.
-
-  if( is_zombie() || is_not_entrant() )
-    return;
-
-  // Make sure all the entry points are correctly aligned for patching.
-  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
-
-  // assert(method()->is_oop(), "must be valid");
-
-  ResourceMark rm;
-
-  if (!CodeCache::contains(this)) {
-    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
-  }
-
-  if(is_native_method() )
-    return;
-
-  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
-  if (nm != this) {
-    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
-                  this));
-  }
-
-  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
-    if (! p->verify(this)) {
-      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
-    }
-  }
-
-  VerifyOopsClosure voc(this);
-  oops_do(&voc);
-  assert(voc.ok(), "embedded oops must be OK");
-  verify_scavenge_root_oops();
-
-  verify_scopes();
-}
-
-
-void nmethod::verify_interrupt_point(address call_site) {
-  // This code does not work in release mode since
-  // owns_lock only is available in debug mode.
-  CompiledIC* ic = NULL;
-  Thread *cur = Thread::current();
-  if (CompiledIC_lock->owner() == cur ||
-      ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
-       SafepointSynchronize::is_at_safepoint())) {
-    ic = CompiledIC_at(this, call_site);
-    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
-  } else {
-    MutexLocker ml_verify (CompiledIC_lock);
-    ic = CompiledIC_at(this, call_site);
-  }
-  PcDesc* pd = pc_desc_at(ic->end_of_call());
-  assert(pd != NULL, "PcDesc must exist");
-  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
-                                     pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
-                                     pd->return_oop());
-       !sd->is_top(); sd = sd->sender()) {
-    sd->verify();
-  }
-}
-
-void nmethod::verify_scopes() {
-  if( !method() ) return;       // Runtime stubs have no scope
-  if (method()->is_native()) return; // Ignore stub methods.
-  // iterate through all interrupt point
-  // and verify the debug information is valid.
-  RelocIterator iter((nmethod*)this);
-  while (iter.next()) {
-    address stub = NULL;
-    switch (iter.type()) {
-      case relocInfo::virtual_call_type:
-        verify_interrupt_point(iter.addr());
-        break;
-      case relocInfo::opt_virtual_call_type:
-        stub = iter.opt_virtual_call_reloc()->static_stub();
-        verify_interrupt_point(iter.addr());
-        break;
-      case relocInfo::static_call_type:
-        stub = iter.static_call_reloc()->static_stub();
-        //verify_interrupt_point(iter.addr());
-        break;
-      case relocInfo::runtime_call_type:
-        address destination = iter.reloc()->value();
-        // Right now there is no way to find out which entries support
-        // an interrupt point.  It would be nice if we had this
-        // information in a table.
-        break;
-    }
-#ifndef GRAAL
-    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
-#endif
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Non-product code
-#ifndef PRODUCT
-
-class DebugScavengeRoot: public OopClosure {
-  nmethod* _nm;
-  bool     _ok;
-public:
-  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
-  bool ok() { return _ok; }
-  virtual void do_oop(oop* p) {
-    if ((*p) == NULL || !(*p)->is_scavengable())  return;
-    if (_ok) {
-      _nm->print_nmethod(true);
-      _ok = false;
-    }
-    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
-    (*p)->print();
-  }
-  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-
-void nmethod::verify_scavenge_root_oops() {
-  if (!on_scavenge_root_list()) {
-    // Actually look inside, to verify the claim that it's clean.
-    DebugScavengeRoot debug_scavenge_root(this);
-    oops_do(&debug_scavenge_root);
-    if (!debug_scavenge_root.ok())
-      fatal("found an unadvertised bad scavengable oop in the code cache");
-  }
-  assert(scavenge_root_not_marked(), "");
-}
-
-#endif // PRODUCT
-
-// Printing operations
-
-void nmethod::print() const {
-  ResourceMark rm;
-  ttyLocker ttyl;   // keep the following output all in one block
-
-  tty->print("Compiled method ");
-
-  if (is_compiled_by_c1()) {
-    tty->print("(c1) ");
-  } else if (is_compiled_by_c2()) {
-    tty->print("(c2) ");
-  } else if (is_compiled_by_shark()) {
-    tty->print("(shark) ");
-  } else {
-    tty->print("(nm) ");
-  }
-
-  print_on(tty, NULL);
-
-  if (WizardMode) {
-    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
-    tty->print(" for method " INTPTR_FORMAT , (address)method());
-    tty->print(" { ");
-    if (is_in_use())      tty->print("in_use ");
-    if (is_not_entrant()) tty->print("not_entrant ");
-    if (is_zombie())      tty->print("zombie ");
-    if (is_unloaded())    tty->print("unloaded ");
-    if (on_scavenge_root_list())  tty->print("scavenge_root ");
-    tty->print_cr("}:");
-  }
-  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              (address)this,
-                                              (address)this + size(),
-                                              size());
-  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              relocation_begin(),
-                                              relocation_end(),
-                                              relocation_size());
-  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              consts_begin(),
-                                              consts_end(),
-                                              consts_size());
-  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              insts_begin(),
-                                              insts_end(),
-                                              insts_size());
-  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              stub_begin(),
-                                              stub_end(),
-                                              stub_size());
-  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              oops_begin(),
-                                              oops_end(),
-                                              oops_size());
-  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              metadata_begin(),
-                                              metadata_end(),
-                                              metadata_size());
-  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              scopes_data_begin(),
-                                              scopes_data_end(),
-                                              scopes_data_size());
-  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              scopes_pcs_begin(),
-                                              scopes_pcs_end(),
-                                              scopes_pcs_size());
-  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              dependencies_begin(),
-                                              dependencies_end(),
-                                              dependencies_size());
-  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              handler_table_begin(),
-                                              handler_table_end(),
-                                              handler_table_size());
-  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              nul_chk_table_begin(),
-                                              nul_chk_table_end(),
-                                              nul_chk_table_size());
-}
-
-void nmethod::print_code() {
-  HandleMark hm;
-  ResourceMark m;
-  Disassembler::decode(this);
-}
-
-
-#ifndef PRODUCT
-
-void nmethod::print_scopes() {
-  // Find the first pc desc for all scopes in the code and print it.
-  ResourceMark rm;
-  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
-    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
-      continue;
-
-    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
-    sd->print_on(tty, p);
-  }
-}
-
-void nmethod::print_dependencies() {
-  ResourceMark rm;
-  ttyLocker ttyl;   // keep the following output all in one block
-  tty->print_cr("Dependencies:");
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    deps.print_dependency();
-    Klass* ctxk = deps.context_type();
-    if (ctxk != NULL) {
-      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
-        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
-      }
-    }
-    deps.log_dependency();  // put it into the xml log also
-  }
-}
-
-
-void nmethod::print_relocations() {
-  ResourceMark m;       // in case methods get printed via the debugger
-  tty->print_cr("relocations:");
-  RelocIterator iter(this);
-  iter.print();
-  if (UseRelocIndex) {
-    jint* index_end   = (jint*)relocation_end() - 1;
-    jint  index_size  = *index_end;
-    jint* index_start = (jint*)( (address)index_end - index_size );
-    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
-    if (index_size > 0) {
-      jint* ip;
-      for (ip = index_start; ip+2 <= index_end; ip += 2)
-        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
-                      ip[0],
-                      ip[1],
-                      header_end()+ip[0],
-                      relocation_begin()-1+ip[1]);
-      for (; ip < index_end; ip++)
-        tty->print_cr("  (%d ?)", ip[0]);
-      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
-      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
-    }
-  }
-}
-
-
-void nmethod::print_pcs() {
-  ResourceMark m;       // in case methods get printed via debugger
-  tty->print_cr("pc-bytecode offsets:");
-  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
-    p->print(this);
-  }
-}
-
-#endif // PRODUCT
-
-const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
-  RelocIterator iter(this, begin, end);
-  bool have_one = false;
-  while (iter.next()) {
-    have_one = true;
-    switch (iter.type()) {
-        case relocInfo::none:                  return "no_reloc";
-        case relocInfo::oop_type: {
-          stringStream st;
-          oop_Relocation* r = iter.oop_reloc();
-          oop obj = r->oop_value();
-          st.print("oop(");
-          if (obj == NULL) st.print("NULL");
-          else obj->print_value_on(&st);
-          st.print(")");
-          return st.as_string();
-        }
-        case relocInfo::metadata_type: {
-          stringStream st;
-          metadata_Relocation* r = iter.metadata_reloc();
-          Metadata* obj = r->metadata_value();
-          st.print("metadata(");
-          if (obj == NULL) st.print("NULL");
-          else obj->print_value_on(&st);
-          st.print(")");
-          return st.as_string();
-        }
-        case relocInfo::virtual_call_type:     return "virtual_call";
-        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
-        case relocInfo::static_call_type:      return "static_call";
-        case relocInfo::static_stub_type:      return "static_stub";
-        case relocInfo::runtime_call_type:     return "runtime_call";
-        case relocInfo::external_word_type:    return "external_word";
-        case relocInfo::internal_word_type:    return "internal_word";
-        case relocInfo::section_word_type:     return "section_word";
-        case relocInfo::poll_type:             return "poll";
-        case relocInfo::poll_return_type:      return "poll_return";
-        case relocInfo::type_mask:             return "type_bit_mask";
-    }
-  }
-  return have_one ? "other" : NULL;
-}
-
-// Return a the last scope in (begin..end]
-ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
-  PcDesc* p = pc_desc_near(begin+1);
-  if (p != NULL && p->real_pc(this) <= end) {
-    return new ScopeDesc(this, p->scope_decode_offset(),
-                         p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(),
-                         p->return_oop());
-  }
-  return NULL;
-}
-
-void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
-  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
-  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
-  if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
-  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
-  if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
-
-  if (has_method_handle_invokes())
-    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
-
-  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
-
-  if (block_begin == entry_point()) {
-    methodHandle m = method();
-    if (m.not_null()) {
-      stream->print("  # ");
-      m->print_value_on(stream);
-      stream->cr();
-    }
-    if (m.not_null() && !is_osr_method()) {
-      ResourceMark rm;
-      int sizeargs = m->size_of_parameters();
-      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
-      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
-      {
-        int sig_index = 0;
-        if (!m->is_static())
-          sig_bt[sig_index++] = T_OBJECT; // 'this'
-        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
-          BasicType t = ss.type();
-          sig_bt[sig_index++] = t;
-          if (type2size[t] == 2) {
-            sig_bt[sig_index++] = T_VOID;
-          } else {
-            assert(type2size[t] == 1, "size is 1 or 2");
-          }
-        }
-        assert(sig_index == sizeargs, "");
-      }
-      const char* spname = "sp"; // make arch-specific?
-      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
-      int stack_slot_offset = this->frame_size() * wordSize;
-      int tab1 = 14, tab2 = 24;
-      int sig_index = 0;
-      int arg_index = (m->is_static() ? 0 : -1);
-      bool did_old_sp = false;
-      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
-        bool at_this = (arg_index == -1);
-        bool at_old_sp = false;
-        BasicType t = (at_this ? T_OBJECT : ss.type());
-        assert(t == sig_bt[sig_index], "sigs in sync");
-        if (at_this)
-          stream->print("  # this: ");
-        else
-          stream->print("  # parm%d: ", arg_index);
-        stream->move_to(tab1);
-        VMReg fst = regs[sig_index].first();
-        VMReg snd = regs[sig_index].second();
-        if (fst->is_reg()) {
-          stream->print("%s", fst->name());
-          if (snd->is_valid())  {
-            stream->print(":%s", snd->name());
-          }
-        } else if (fst->is_stack()) {
-          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
-          if (offset == stack_slot_offset)  at_old_sp = true;
-          stream->print("[%s+0x%x]", spname, offset);
-        } else {
-          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
-        }
-        stream->print(" ");
-        stream->move_to(tab2);
-        stream->print("= ");
-        if (at_this) {
-          m->method_holder()->print_value_on(stream);
-        } else {
-          bool did_name = false;
-          if (!at_this && ss.is_object()) {
-            Symbol* name = ss.as_symbol_or_null();
-            if (name != NULL) {
-              name->print_value_on(stream);
-              did_name = true;
-            }
-          }
-          if (!did_name)
-            stream->print("%s", type2name(t));
-        }
-        if (at_old_sp) {
-          stream->print("  (%s of caller)", spname);
-          did_old_sp = true;
-        }
-        stream->cr();
-        sig_index += type2size[t];
-        arg_index += 1;
-        if (!at_this)  ss.next();
-      }
-      if (!did_old_sp) {
-        stream->print("  # ");
-        stream->move_to(tab1);
-        stream->print("[%s+0x%x]", spname, stack_slot_offset);
-        stream->print("  (%s of caller)", spname);
-        stream->cr();
-      }
-    }
-  }
-}
-
-void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
-  // First, find an oopmap in (begin, end].
-  // We use the odd half-closed interval so that oop maps and scope descs
-  // which are tied to the byte after a call are printed with the call itself.
-  address base = code_begin();
-  OopMapSet* oms = oop_maps();
-  if (oms != NULL) {
-    for (int i = 0, imax = oms->size(); i < imax; i++) {
-      OopMap* om = oms->at(i);
-      address pc = base + om->offset();
-      if (pc > begin) {
-        if (pc <= end) {
-          st->move_to(column);
-          st->print("; ");
-          om->print_on(st);
-        }
-        break;
-      }
-    }
-  }
-
-  // Print any debug info present at this pc.
-  ScopeDesc* sd  = scope_desc_in(begin, end);
-  if (sd != NULL) {
-    st->move_to(column);
-    if (sd->bci() == SynchronizationEntryBCI) {
-      st->print(";*synchronization entry");
-    } else {
-      if (sd->method() == NULL) {
-        st->print("method is NULL");
-      } else if (sd->method()->is_native()) {
-        st->print("method is native");
-      } else {
-        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
-        st->print(";*%s", Bytecodes::name(bc));
-        switch (bc) {
-        case Bytecodes::_invokevirtual:
-        case Bytecodes::_invokespecial:
-        case Bytecodes::_invokestatic:
-        case Bytecodes::_invokeinterface:
-          {
-            Bytecode_invoke invoke(sd->method(), sd->bci());
-            st->print(" ");
-            if (invoke.name() != NULL)
-              invoke.name()->print_symbol_on(st);
-            else
-              st->print("<UNKNOWN>");
-            break;
-          }
-        case Bytecodes::_getfield:
-        case Bytecodes::_putfield:
-        case Bytecodes::_getstatic:
-        case Bytecodes::_putstatic:
-          {
-            Bytecode_field field(sd->method(), sd->bci());
-            st->print(" ");
-            if (field.name() != NULL)
-              field.name()->print_symbol_on(st);
-            else
-              st->print("<UNKNOWN>");
-          }
-        }
-      }
-    }
-
-    // Print all scopes
-    for (;sd != NULL; sd = sd->sender()) {
-      st->move_to(column);
-      st->print("; -");
-      if (sd->method() == NULL) {
-        st->print("method is NULL");
-      } else {
-        sd->method()->print_short_name(st);
-      }
-      int lineno = sd->method()->line_number_from_bci(sd->bci());
-      if (lineno != -1) {
-        st->print("@%d (line %d)", sd->bci(), lineno);
-      } else {
-        st->print("@%d", sd->bci());
-      }
-      st->cr();
-    }
-  }
-
-  // Print relocation information
-  const char* str = reloc_string_for(begin, end);
-  if (str != NULL) {
-    if (sd != NULL) st->cr();
-    st->move_to(column);
-    st->print(";   {%s}", str);
-  }
-  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
-  if (cont_offset != 0) {
-    st->move_to(column);
-    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
-  }
-
-}
-
-#ifndef PRODUCT
-
-void nmethod::print_value_on(outputStream* st) const {
-  st->print("nmethod");
-  print_on(st, NULL);
-}
-
-void nmethod::print_calls(outputStream* st) {
-  RelocIterator iter(this);
-  while (iter.next()) {
-    switch (iter.type()) {
-    case relocInfo::virtual_call_type:
-    case relocInfo::opt_virtual_call_type: {
-      VerifyMutexLocker mc(CompiledIC_lock);
-      CompiledIC_at(iter.reloc())->print();
-      break;
-    }
-    case relocInfo::static_call_type:
-      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
-      compiledStaticCall_at(iter.reloc())->print();
-      break;
-    }
-  }
-}
-
-void nmethod::print_handler_table() {
-  ExceptionHandlerTable(this).print();
-}
-
-void nmethod::print_nul_chk_table() {
-  ImplicitExceptionTable(this).print(code_begin());
-}
-
-#endif // PRODUCT
-
-void nmethod::print_statistics() {
-  ttyLocker ttyl;
-  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
-  nmethod_stats.print_native_nmethod_stats();
-  nmethod_stats.print_nmethod_stats();
-  DebugInformationRecorder::print_statistics();
-  nmethod_stats.print_pc_stats();
-  Dependencies::print_statistics();
-  if (xtty != NULL)  xtty->tail("statistics");
-}
-
-#ifdef GRAAL
-void DebugScopedNMethod::print_on(outputStream* st) {
-  if (_nm != NULL) {
-    st->print("nmethod@%p", _nm);
-    Method* method = _nm->method();
-    if (method != NULL) {
-      char holder[O_BUFLEN];
-      char nameAndSig[O_BUFLEN];
-      method->method_holder()->name()->as_C_string(holder, O_BUFLEN);
-      method->name_and_sig_as_C_string(nameAndSig, O_BUFLEN);
-      st->print(" - %s::%s", holder, nameAndSig);
-    }
-  }
-}
-#endif
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCache.hpp"
+#include "code/compiledIC.hpp"
+#include "code/dependencies.hpp"
+#include "code/nmethod.hpp"
+#include "code/scopeDesc.hpp"
+#include "compiler/abstractCompiler.hpp"
+#include "compiler/compileBroker.hpp"
+#include "compiler/compileLog.hpp"
+#include "compiler/compilerOracle.hpp"
+#include "compiler/disassembler.hpp"
+#include "interpreter/bytecode.hpp"
+#include "oops/methodData.hpp"
+#include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "prims/jvmtiImpl.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/sweeper.hpp"
+#include "utilities/dtrace.hpp"
+#include "utilities/events.hpp"
+#include "utilities/xmlstream.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/machineCodePrinter.hpp"
+#ifdef SHARK
+#include "shark/sharkCompiler.hpp"
+#endif
+#ifdef GRAAL
+#include "graal/graalJavaAccess.hpp"
+#endif
+
+#ifdef DTRACE_ENABLED
+
+// Only bother with this argument setup if dtrace is available
+
+#ifndef USDT2
+HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
+  const char*, int, const char*, int, const char*, int, void*, size_t);
+
+HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
+  char*, int, char*, int, char*, int);
+
+#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
+  {                                                                       \
+    Method* m = (method);                                                 \
+    if (m != NULL) {                                                      \
+      Symbol* klass_name = m->klass_name();                               \
+      Symbol* name = m->name();                                           \
+      Symbol* signature = m->signature();                                 \
+      HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
+        klass_name->bytes(), klass_name->utf8_length(),                   \
+        name->bytes(), name->utf8_length(),                               \
+        signature->bytes(), signature->utf8_length());                    \
+    }                                                                     \
+  }
+#else /* USDT2 */
+#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
+  {                                                                       \
+    Method* m = (method);                                                 \
+    if (m != NULL) {                                                      \
+      Symbol* klass_name = m->klass_name();                               \
+      Symbol* name = m->name();                                           \
+      Symbol* signature = m->signature();                                 \
+      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
+        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
+        (char *) name->bytes(), name->utf8_length(),                               \
+        (char *) signature->bytes(), signature->utf8_length());                    \
+    }                                                                     \
+  }
+#endif /* USDT2 */
+
+#else //  ndef DTRACE_ENABLED
+
+#define DTRACE_METHOD_UNLOAD_PROBE(method)
+
+#endif
+
+bool nmethod::is_compiled_by_c1() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
+  if (is_native_method()) return false;
+  return compiler()->is_c1();
+}
+bool nmethod::is_compiled_by_c2() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
+  if (is_native_method()) return false;
+  return compiler()->is_c2();
+}
+bool nmethod::is_compiled_by_shark() const {
+  if (is_native_method()) return false;
+  assert(compiler() != NULL, "must be");
+  return compiler()->is_shark();
+}
+
+
+
+//---------------------------------------------------------------------------------
+// NMethod statistics
+// They are printed under various flags, including:
+//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
+// (In the latter two cases, they like other stats are printed to the log only.)
+
+// These variables are put into one block to reduce relocations
+// and make it simpler to print from the debugger.
+static
+struct nmethod_stats_struct {
+  int nmethod_count;
+  int total_size;
+  int relocation_size;
+  int consts_size;
+  int insts_size;
+  int stub_size;
+  int scopes_data_size;
+  int scopes_pcs_size;
+  int dependencies_size;
+  int handler_table_size;
+  int nul_chk_table_size;
+  int oops_size;
+
+  void note_nmethod(nmethod* nm) {
+    nmethod_count += 1;
+    total_size          += nm->size();
+    relocation_size     += nm->relocation_size();
+    consts_size         += nm->consts_size();
+    insts_size          += nm->insts_size();
+    stub_size           += nm->stub_size();
+    oops_size           += nm->oops_size();
+    scopes_data_size    += nm->scopes_data_size();
+    scopes_pcs_size     += nm->scopes_pcs_size();
+    dependencies_size   += nm->dependencies_size();
+    handler_table_size  += nm->handler_table_size();
+    nul_chk_table_size  += nm->nul_chk_table_size();
+  }
+  void print_nmethod_stats() {
+    if (nmethod_count == 0)  return;
+    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
+    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
+    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
+    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
+    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
+    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
+    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
+    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
+    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
+    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
+    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
+    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
+  }
+
+  int native_nmethod_count;
+  int native_total_size;
+  int native_relocation_size;
+  int native_insts_size;
+  int native_oops_size;
+  void note_native_nmethod(nmethod* nm) {
+    native_nmethod_count += 1;
+    native_total_size       += nm->size();
+    native_relocation_size  += nm->relocation_size();
+    native_insts_size       += nm->insts_size();
+    native_oops_size        += nm->oops_size();
+  }
+  void print_native_nmethod_stats() {
+    if (native_nmethod_count == 0)  return;
+    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
+    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
+    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
+    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
+    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
+  }
+
+  int pc_desc_resets;   // number of resets (= number of caches)
+  int pc_desc_queries;  // queries to nmethod::find_pc_desc
+  int pc_desc_approx;   // number of those which have approximate true
+  int pc_desc_repeats;  // number of _pc_descs[0] hits
+  int pc_desc_hits;     // number of LRU cache hits
+  int pc_desc_tests;    // total number of PcDesc examinations
+  int pc_desc_searches; // total number of quasi-binary search steps
+  int pc_desc_adds;     // number of LUR cache insertions
+
+  void print_pc_stats() {
+    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
+                  pc_desc_queries,
+                  (double)(pc_desc_tests + pc_desc_searches)
+                  / pc_desc_queries);
+    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
+                  pc_desc_resets,
+                  pc_desc_queries, pc_desc_approx,
+                  pc_desc_repeats, pc_desc_hits,
+                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
+  }
+} nmethod_stats;
+
+
+//---------------------------------------------------------------------------------
+
+
+ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
+  assert(pc != NULL, "Must be non null");
+  assert(exception.not_null(), "Must be non null");
+  assert(handler != NULL, "Must be non null");
+
+  _count = 0;
+  _exception_type = exception->klass();
+  _next = NULL;
+
+  add_address_and_handler(pc,handler);
+}
+
+
+address ExceptionCache::match(Handle exception, address pc) {
+  assert(pc != NULL,"Must be non null");
+  assert(exception.not_null(),"Must be non null");
+  if (exception->klass() == exception_type()) {
+    return (test_address(pc));
+  }
+
+  return NULL;
+}
+
+
+bool ExceptionCache::match_exception_with_space(Handle exception) {
+  assert(exception.not_null(),"Must be non null");
+  if (exception->klass() == exception_type() && count() < cache_size) {
+    return true;
+  }
+  return false;
+}
+
+
+address ExceptionCache::test_address(address addr) {
+  for (int i=0; i<count(); i++) {
+    if (pc_at(i) == addr) {
+      return handler_at(i);
+    }
+  }
+  return NULL;
+}
+
+
+bool ExceptionCache::add_address_and_handler(address addr, address handler) {
+  if (test_address(addr) == handler) return true;
+  if (count() < cache_size) {
+    set_pc_at(count(),addr);
+    set_handler_at(count(), handler);
+    increment_count();
+    return true;
+  }
+  return false;
+}
+
+
+// private method for handling exception cache
+// These methods are private, and used to manipulate the exception cache
+// directly.
+ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
+  ExceptionCache* ec = exception_cache();
+  while (ec != NULL) {
+    if (ec->match_exception_with_space(exception)) {
+      return ec;
+    }
+    ec = ec->next();
+  }
+  return NULL;
+}
+
+
+//-----------------------------------------------------------------------------
+
+
+// Helper used by both find_pc_desc methods.
+static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
+  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
+  if (!approximate)
+    return pc->pc_offset() == pc_offset;
+  else
+    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
+}
+
+void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
+  if (initial_pc_desc == NULL) {
+    _pc_descs[0] = NULL; // native method; no PcDescs at all
+    return;
+  }
+  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
+  // reset the cache by filling it with benign (non-null) values
+  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
+  for (int i = 0; i < cache_size; i++)
+    _pc_descs[i] = initial_pc_desc;
+}
+
+PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
+  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
+  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
+
+  // Note: one might think that caching the most recently
+  // read value separately would be a win, but one would be
+  // wrong.  When many threads are updating it, the cache
+  // line it's in would bounce between caches, negating
+  // any benefit.
+
+  // In order to prevent race conditions do not load cache elements
+  // repeatedly, but use a local copy:
+  PcDesc* res;
+
+  // Step one:  Check the most recently added value.
+  res = _pc_descs[0];
+  if (res == NULL) return NULL;  // native method; no PcDescs at all
+  if (match_desc(res, pc_offset, approximate)) {
+    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
+    return res;
+  }
+
+  // Step two:  Check the rest of the LRU cache.
+  for (int i = 1; i < cache_size; ++i) {
+    res = _pc_descs[i];
+    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
+    if (match_desc(res, pc_offset, approximate)) {
+      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
+      return res;
+    }
+  }
+
+  // Report failure.
+  return NULL;
+}
+
+void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
+  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
+  // Update the LRU cache by shifting pc_desc forward.
+  for (int i = 0; i < cache_size; i++)  {
+    PcDesc* next = _pc_descs[i];
+    _pc_descs[i] = pc_desc;
+    pc_desc = next;
+  }
+}
+
+// adjust pcs_size so that it is a multiple of both oopSize and
+// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
+// of oopSize, then 2*sizeof(PcDesc) is)
+static int adjust_pcs_size(int pcs_size) {
+  int nsize = round_to(pcs_size,   oopSize);
+  if ((nsize % sizeof(PcDesc)) != 0) {
+    nsize = pcs_size + sizeof(PcDesc);
+  }
+  assert((nsize % oopSize) == 0, "correct alignment");
+  return nsize;
+}
+
+//-----------------------------------------------------------------------------
+
+
+void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
+  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
+  assert(new_entry != NULL,"Must be non null");
+  assert(new_entry->next() == NULL, "Must be null");
+
+  if (exception_cache() != NULL) {
+    new_entry->set_next(exception_cache());
+  }
+  set_exception_cache(new_entry);
+}
+
+void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
+  ExceptionCache* prev = NULL;
+  ExceptionCache* curr = exception_cache();
+  assert(curr != NULL, "nothing to remove");
+  // find the previous and next entry of ec
+  while (curr != ec) {
+    prev = curr;
+    curr = curr->next();
+    assert(curr != NULL, "ExceptionCache not found");
+  }
+  // now: curr == ec
+  ExceptionCache* next = curr->next();
+  if (prev == NULL) {
+    set_exception_cache(next);
+  } else {
+    prev->set_next(next);
+  }
+  delete curr;
+}
+
+
+// public method for accessing the exception cache
+// These are the public access methods.
+address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
+  // We never grab a lock to read the exception cache, so we may
+  // have false negatives. This is okay, as it can only happen during
+  // the first few exception lookups for a given nmethod.
+  ExceptionCache* ec = exception_cache();
+  while (ec != NULL) {
+    address ret_val;
+    if ((ret_val = ec->match(exception,pc)) != NULL) {
+      return ret_val;
+    }
+    ec = ec->next();
+  }
+  return NULL;
+}
+
+
+void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
+  // There are potential race conditions during exception cache updates, so we
+  // must own the ExceptionCache_lock before doing ANY modifications. Because
+  // we don't lock during reads, it is possible to have several threads attempt
+  // to update the cache with the same data. We need to check for already inserted
+  // copies of the current data before adding it.
+
+  MutexLocker ml(ExceptionCache_lock);
+  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
+
+  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
+    target_entry = new ExceptionCache(exception,pc,handler);
+    add_exception_cache_entry(target_entry);
+  }
+}
+
+
+//-------------end of code for ExceptionCache--------------
+
+
+int nmethod::total_size() const {
+  return
+    consts_size()        +
+    insts_size()         +
+    stub_size()          +
+    scopes_data_size()   +
+    scopes_pcs_size()    +
+    handler_table_size() +
+    nul_chk_table_size();
+}
+
+const char* nmethod::compile_kind() const {
+  if (is_osr_method())     return "osr";
+  if (method() != NULL && is_native_method())  return "c2n";
+  return NULL;
+}
+
+// Fill in default values for various flag fields
+void nmethod::init_defaults() {
+  _state                      = alive;
+  _marked_for_reclamation     = 0;
+  _has_flushed_dependencies   = 0;
+  _speculatively_disconnected = 0;
+  _has_unsafe_access          = 0;
+  _has_method_handle_invokes  = 0;
+  _lazy_critical_native       = 0;
+  _has_wide_vectors           = 0;
+  _marked_for_deoptimization  = 0;
+  _lock_count                 = 0;
+  _stack_traversal_mark       = 0;
+  _unload_reported            = false;           // jvmti state
+
+#ifdef ASSERT
+  _oops_are_stale             = false;
+#endif
+
+  _oops_do_mark_link       = NULL;
+  _jmethod_id              = NULL;
+  _osr_link                = NULL;
+  _scavenge_root_link      = NULL;
+  _scavenge_root_state     = 0;
+  _saved_nmethod_link      = NULL;
+  _compiler                = NULL;
+#ifdef GRAAL
+  _graal_installed_code   = NULL;
+#endif
+#ifdef HAVE_DTRACE_H
+  _trap_offset             = 0;
+#endif // def HAVE_DTRACE_H
+}
+
+
+nmethod* nmethod::new_native_nmethod(methodHandle method,
+  int compile_id,
+  CodeBuffer *code_buffer,
+  int vep_offset,
+  int frame_complete,
+  int frame_size,
+  ByteSize basic_lock_owner_sp_offset,
+  ByteSize basic_lock_sp_offset,
+  OopMapSet* oop_maps) {
+  code_buffer->finalize_oop_references(method);
+  // create nmethod
+  nmethod* nm = NULL;
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
+    CodeOffsets offsets;
+    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
+    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+    nm = new (native_nmethod_size)
+      nmethod(method(), native_nmethod_size, compile_id, &offsets,
+              code_buffer, frame_size,
+              basic_lock_owner_sp_offset, basic_lock_sp_offset,
+              oop_maps);
+    if (nm != NULL)  nmethod_stats.note_native_nmethod(nm);
+    if (PrintAssembly && nm != NULL)
+      Disassembler::decode(nm);
+
+    if (PrintMachineCodeToFile) {
+      MachineCodePrinter::print(nm);
+    }
+  }
+  // verify nmethod
+  debug_only(if (nm) nm->verify();) // might block
+
+  if (nm != NULL) {
+    nm->log_new_nmethod();
+  }
+
+  return nm;
+}
+
+#ifdef HAVE_DTRACE_H
+nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
+                                     CodeBuffer *code_buffer,
+                                     int vep_offset,
+                                     int trap_offset,
+                                     int frame_complete,
+                                     int frame_size) {
+  code_buffer->finalize_oop_references(method);
+  // create nmethod
+  nmethod* nm = NULL;
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
+    CodeOffsets offsets;
+    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
+    offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
+    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+
+    nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size);
+
+    if (nm != NULL)  nmethod_stats.note_nmethod(nm);
+    if (PrintAssembly && nm != NULL)
+      Disassembler::decode(nm);
+  }
+  // verify nmethod
+  debug_only(if (nm) nm->verify();) // might block
+
+  if (nm != NULL) {
+    nm->log_new_nmethod();
+  }
+
+  return nm;
+}
+
+#endif // def HAVE_DTRACE_H
+
+nmethod* nmethod::new_nmethod(methodHandle method,
+  int compile_id,
+  int entry_bci,
+  CodeOffsets* offsets,
+  int orig_pc_offset,
+  DebugInformationRecorder* debug_info,
+  Dependencies* dependencies,
+  CodeBuffer* code_buffer, int frame_size,
+  OopMapSet* oop_maps,
+  ExceptionHandlerTable* handler_table,
+  ImplicitExceptionTable* nul_chk_table,
+  AbstractCompiler* compiler,
+  int comp_level
+#ifdef GRAAL
+  , Handle installed_code
+#endif
+)
+{
+  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
+  code_buffer->finalize_oop_references(method);
+  // create nmethod
+  nmethod* nm = NULL;
+  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    int nmethod_size =
+      allocation_size(code_buffer, sizeof(nmethod))
+      + adjust_pcs_size(debug_info->pcs_size())
+      + round_to(dependencies->size_in_bytes() , oopSize)
+      + round_to(handler_table->size_in_bytes(), oopSize)
+      + round_to(nul_chk_table->size_in_bytes(), oopSize)
+      + round_to(debug_info->data_size()       , oopSize);
+    nm = new (nmethod_size)
+      nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
+              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
+              oop_maps,
+              handler_table,
+              nul_chk_table,
+              compiler,
+              comp_level
+#ifdef GRAAL
+              , installed_code
+#endif
+              );
+    if (nm != NULL) {
+      // To make dependency checking during class loading fast, record
+      // the nmethod dependencies in the classes it is dependent on.
+      // This allows the dependency checking code to simply walk the
+      // class hierarchy above the loaded class, checking only nmethods
+      // which are dependent on those classes.  The slow way is to
+      // check every nmethod for dependencies which makes it linear in
+      // the number of methods compiled.  For applications with a lot
+      // classes the slow way is too slow.
+      for (Dependencies::DepStream deps(nm); deps.next(); ) {
+        Klass* klass = deps.context_type();
+        if (klass == NULL)  continue;  // ignore things like evol_method
+
+        // record this nmethod as dependent on this klass
+        InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
+      }
+    }
+    if (nm != NULL)  nmethod_stats.note_nmethod(nm);
+    if (PrintAssembly && nm != NULL)
+      Disassembler::decode(nm);
+
+    if (PrintMachineCodeToFile) {
+      MachineCodePrinter::print(nm);
+    }
+  }
+
+  // verify nmethod
+  debug_only(if (nm) nm->verify();) // might block
+
+  if (nm != NULL) {
+    nm->log_new_nmethod();
+  }
+
+  // done
+  return nm;
+}
+
+
+// For native wrappers
+nmethod::nmethod(
+  Method* method,
+  int nmethod_size,
+  int compile_id,
+  CodeOffsets* offsets,
+  CodeBuffer* code_buffer,
+  int frame_size,
+  ByteSize basic_lock_owner_sp_offset,
+  ByteSize basic_lock_sp_offset,
+  OopMapSet* oop_maps )
+  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
+             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
+  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
+  _native_basic_lock_sp_offset(basic_lock_sp_offset)
+{
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+    assert_locked_or_safepoint(CodeCache_lock);
+
+    init_defaults();
+    _method                  = method;
+    _entry_bci               = InvocationEntryBci;
+    // We have no exception handler or deopt handler make the
+    // values something that will never match a pc like the nmethod vtable entry
+    _exception_offset        = 0;
+    _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
+    _orig_pc_offset          = 0;
+
+    _consts_offset           = data_offset();
+    _stub_offset             = data_offset();
+    _oops_offset             = data_offset();
+    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
+    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
+    _scopes_pcs_offset       = _scopes_data_offset;
+    _dependencies_offset     = _scopes_pcs_offset;
+    _handler_table_offset    = _dependencies_offset;
+    _nul_chk_table_offset    = _handler_table_offset;
+    _nmethod_end_offset      = _nul_chk_table_offset;
+    _compile_id              = compile_id;
+    _comp_level              = CompLevel_none;
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
+    _osr_entry_point         = NULL;
+    _exception_cache         = NULL;
+    _pc_desc_cache.reset_to(NULL);
+
+    code_buffer->copy_values_to(this);
+    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+      CodeCache::add_scavenge_root_nmethod(this);
+    }
+    debug_only(verify_scavenge_root_oops());
+    CodeCache::commit(this);
+  }
+
+  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
+    ttyLocker ttyl;  // keep the following output all in one block
+    // This output goes directly to the tty, not the compiler log.
+    // To enable tools to match it up with the compilation activity,
+    // be sure to tag this tty output with the compile ID.
+    if (xtty != NULL) {
+      xtty->begin_head("print_native_nmethod");
+      xtty->method(_method);
+      xtty->stamp();
+      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
+    }
+    // print the header part first
+    print();
+    // then print the requested information
+    if (PrintNativeNMethods) {
+      print_code();
+      if (oop_maps != NULL) {
+        oop_maps->print();
+      }
+    }
+    if (PrintRelocations) {
+      print_relocations();
+    }
+    if (xtty != NULL) {
+      xtty->tail("print_native_nmethod");
+    }
+  }
+}
+
+// For dtrace wrappers
+#ifdef HAVE_DTRACE_H
+nmethod::nmethod(
+  Method* method,
+  int nmethod_size,
+  CodeOffsets* offsets,
+  CodeBuffer* code_buffer,
+  int frame_size)
+  : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
+             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
+  _native_receiver_sp_offset(in_ByteSize(-1)),
+  _native_basic_lock_sp_offset(in_ByteSize(-1))
+{
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+    assert_locked_or_safepoint(CodeCache_lock);
+
+    init_defaults();
+    _method                  = method;
+    _entry_bci               = InvocationEntryBci;
+    // We have no exception handler or deopt handler make the
+    // values something that will never match a pc like the nmethod vtable entry
+    _exception_offset        = 0;
+    _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
+    _unwind_handler_offset   = -1;
+    _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
+    _orig_pc_offset          = 0;
+    _consts_offset           = data_offset();
+    _stub_offset             = data_offset();
+    _oops_offset             = data_offset();
+    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
+    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
+    _scopes_pcs_offset       = _scopes_data_offset;
+    _dependencies_offset     = _scopes_pcs_offset;
+    _handler_table_offset    = _dependencies_offset;
+    _nul_chk_table_offset    = _handler_table_offset;
+    _nmethod_end_offset      = _nul_chk_table_offset;
+    _compile_id              = 0;  // default
+    _comp_level              = CompLevel_none;
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
+    _osr_entry_point         = NULL;
+    _exception_cache         = NULL;
+    _pc_desc_cache.reset_to(NULL);
+
+    code_buffer->copy_values_to(this);
+    debug_only(verify_scavenge_root_oops());
+    CodeCache::commit(this);
+  }
+
+  if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
+    ttyLocker ttyl;  // keep the following output all in one block
+    // This output goes directly to the tty, not the compiler log.
+    // To enable tools to match it up with the compilation activity,
+    // be sure to tag this tty output with the compile ID.
+    if (xtty != NULL) {
+      xtty->begin_head("print_dtrace_nmethod");
+      xtty->method(_method);
+      xtty->stamp();
+      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
+    }
+    // print the header part first
+    print();
+    // then print the requested information
+    if (PrintNMethods) {
+      print_code();
+    }
+    if (PrintRelocations) {
+      print_relocations();
+    }
+    if (xtty != NULL) {
+      xtty->tail("print_dtrace_nmethod");
+    }
+  }
+}
+#endif // def HAVE_DTRACE_H
+
+void* nmethod::operator new(size_t size, int nmethod_size) {
+  // Always leave some room in the CodeCache for I2C/C2I adapters
+  if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL;
+  return CodeCache::allocate(nmethod_size);
+}
+
+
+nmethod::nmethod(
+  Method* method,
+  int nmethod_size,
+  int compile_id,
+  int entry_bci,
+  CodeOffsets* offsets,
+  int orig_pc_offset,
+  DebugInformationRecorder* debug_info,
+  Dependencies* dependencies,
+  CodeBuffer *code_buffer,
+  int frame_size,
+  OopMapSet* oop_maps,
+  ExceptionHandlerTable* handler_table,
+  ImplicitExceptionTable* nul_chk_table,
+  AbstractCompiler* compiler,
+  int comp_level
+#ifdef GRAAL
+  , Handle installed_code
+#endif
+  )
+  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
+             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
+  _native_receiver_sp_offset(in_ByteSize(-1)),
+  _native_basic_lock_sp_offset(in_ByteSize(-1))
+{
+  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+    assert_locked_or_safepoint(CodeCache_lock);
+
+    init_defaults();
+    _method                  = method;
+    _entry_bci               = entry_bci;
+    _compile_id              = compile_id;
+    _comp_level              = comp_level;
+    _compiler                = compiler;
+    _orig_pc_offset          = orig_pc_offset;
+
+    // Section offsets
+    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
+    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
+
+#ifdef GRAAL
+    _graal_installed_code = installed_code();
+
+    // graal produces no (!) stub section
+    if (offsets->value(CodeOffsets::Exceptions) != -1) {
+      _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
+    } else {
+      _exception_offset = -1;
+    }
+    if (offsets->value(CodeOffsets::Deopt) != -1) {
+      _deoptimize_offset       = code_offset()          + offsets->value(CodeOffsets::Deopt);
+    } else {
+      _deoptimize_offset = -1;
+    }
+    if (offsets->value(CodeOffsets::DeoptMH) != -1) {
+      _deoptimize_mh_offset  = code_offset()          + offsets->value(CodeOffsets::DeoptMH);
+    } else {
+      _deoptimize_mh_offset  = -1;
+    }
+#else
+    // Exception handler and deopt handler are in the stub section
+    assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
+    assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
+
+      _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
+      _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
+      if (offsets->value(CodeOffsets::DeoptMH) != -1) {
+        _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
+      } else {
+        _deoptimize_mh_offset  = -1;
+      }
+#endif
+    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
+      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
+    } else {
+      _unwind_handler_offset = -1;
+    }
+
+    _oops_offset             = data_offset();
+    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
+    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
+
+    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
+    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
+    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
+    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
+    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
+
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
+    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
+    _exception_cache         = NULL;
+    _pc_desc_cache.reset_to(scopes_pcs_begin());
+
+    // Copy contents of ScopeDescRecorder to nmethod
+    code_buffer->copy_values_to(this);
+    debug_info->copy_to(this);
+    dependencies->copy_to(this);
+    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+      CodeCache::add_scavenge_root_nmethod(this);
+    }
+    debug_only(verify_scavenge_root_oops());
+
+    CodeCache::commit(this);
+
+    // Copy contents of ExceptionHandlerTable to nmethod
+    handler_table->copy_to(this);
+    nul_chk_table->copy_to(this);
+
+    // we use the information of entry points to find out if a method is
+    // static or non static
+    assert(compiler->is_c2() ||
+           _method->is_static() == (entry_point() == _verified_entry_point),
+           " entry points must be same for static methods and vice versa");
+  }
+
+  bool printnmethods = PrintNMethods
+    || CompilerOracle::should_print(_method)
+    || CompilerOracle::has_option_string(_method, "PrintNMethods");
+  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
+    print_nmethod(printnmethods);
+  }
+}
+
+
+// Print a short set of xml attributes to identify this nmethod.  The
+// output should be embedded in some other element.
+void nmethod::log_identity(xmlStream* log) const {
+  log->print(" compile_id='%d'", compile_id());
+  const char* nm_kind = compile_kind();
+  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
+  if (compiler() != NULL) {
+    log->print(" compiler='%s'", compiler()->name());
+  }
+  if (TieredCompilation) {
+    log->print(" level='%d'", comp_level());
+  }
+}
+
+
+#define LOG_OFFSET(log, name)                    \
+  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
+    log->print(" " XSTR(name) "_offset='%d'"    , \
+               (intptr_t)name##_begin() - (intptr_t)this)
+
+
+void nmethod::log_new_nmethod() const {
+  if (LogCompilation && xtty != NULL) {
+    ttyLocker ttyl;
+    HandleMark hm;
+    xtty->begin_elem("nmethod");
+    log_identity(xtty);
+    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
+    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
+
+    LOG_OFFSET(xtty, relocation);
+    LOG_OFFSET(xtty, consts);
+    LOG_OFFSET(xtty, insts);
+    LOG_OFFSET(xtty, stub);
+    LOG_OFFSET(xtty, scopes_data);
+    LOG_OFFSET(xtty, scopes_pcs);
+    LOG_OFFSET(xtty, dependencies);
+    LOG_OFFSET(xtty, handler_table);
+    LOG_OFFSET(xtty, nul_chk_table);
+    LOG_OFFSET(xtty, oops);
+
+    xtty->method(method());
+    xtty->stamp();
+    xtty->end_elem();
+  }
+}
+
+#undef LOG_OFFSET
+
+
+// Print out more verbose output usually for a newly created nmethod.
+void nmethod::print_on(outputStream* st, const char* msg) const {
+  if (st != NULL) {
+    ttyLocker ttyl;
+    if (WizardMode) {
+      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
+      st->print_cr(" (" INTPTR_FORMAT ")", this);
+    } else {
+      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
+    }
+  }
+}
+
+
+void nmethod::print_nmethod(bool printmethod) {
+  ttyLocker ttyl;  // keep the following output all in one block
+  if (xtty != NULL) {
+    xtty->begin_head("print_nmethod");
+    xtty->stamp();
+    xtty->end_head();
+  }
+  // print the header part first
+  print();
+  // then print the requested information
+  if (printmethod) {
+    print_code();
+    print_pcs();
+    if (oop_maps()) {
+      oop_maps()->print();
+    }
+  }
+  if (PrintDebugInfo) {
+    print_scopes();
+  }
+  if (PrintRelocations) {
+    print_relocations();
+  }
+  if (PrintDependencies) {
+    print_dependencies();
+  }
+  if (PrintExceptionHandlers) {
+    print_handler_table();
+    print_nul_chk_table();
+  }
+  if (xtty != NULL) {
+    xtty->tail("print_nmethod");
+  }
+}
+
+
+// Promote one word from an assembly-time handle to a live embedded oop.
+inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
+  if (handle == NULL ||
+      // As a special case, IC oops are initialized to 1 or -1.
+      handle == (jobject) Universe::non_oop_word()) {
+    (*dest) = (oop) handle;
+  } else {
+    (*dest) = JNIHandles::resolve_non_null(handle);
+  }
+}
+
+
+// Have to have the same name because it's called by a template
+void nmethod::copy_values(GrowableArray<jobject>* array) {
+  int length = array->length();
+  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
+  oop* dest = oops_begin();
+  for (int index = 0 ; index < length; index++) {
+    initialize_immediate_oop(&dest[index], array->at(index));
+  }
+
+  // Now we can fix up all the oops in the code.  We need to do this
+  // in the code because the assembler uses jobjects as placeholders.
+  // The code and relocations have already been initialized by the
+  // CodeBlob constructor, so it is valid even at this early point to
+  // iterate over relocations and patch the code.
+  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
+}
+
+void nmethod::copy_values(GrowableArray<Metadata*>* array) {
+  int length = array->length();
+  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
+  Metadata** dest = metadata_begin();
+  for (int index = 0 ; index < length; index++) {
+    dest[index] = array->at(index);
+  }
+}
+
+bool nmethod::is_at_poll_return(address pc) {
+  RelocIterator iter(this, pc, pc+1);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::poll_return_type)
+      return true;
+  }
+  return false;
+}
+
+
+bool nmethod::is_at_poll_or_poll_return(address pc) {
+  RelocIterator iter(this, pc, pc+1);
+  while (iter.next()) {
+    relocInfo::relocType t = iter.type();
+    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
+      return true;
+  }
+  return false;
+}
+
+
+void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
+  // re-patch all oop-bearing instructions, just in case some oops moved
+  RelocIterator iter(this, begin, end);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* reloc = iter.oop_reloc();
+      if (initialize_immediates && reloc->oop_is_immediate()) {
+        oop* dest = reloc->oop_addr();
+        initialize_immediate_oop(dest, (jobject) *dest);
+      }
+      // Refresh the oop-related bits of this instruction.
+      reloc->fix_oop_relocation();
+    } else if (iter.type() == relocInfo::metadata_type) {
+      metadata_Relocation* reloc = iter.metadata_reloc();
+      reloc->fix_metadata_relocation();
+    }
+
+    // There must not be any interfering patches or breakpoints.
+    assert(!(iter.type() == relocInfo::breakpoint_type
+             && iter.breakpoint_reloc()->active()),
+           "no active breakpoint");
+  }
+}
+
+
+void nmethod::verify_oop_relocations() {
+  // Ensure sure that the code matches the current oop values
+  RelocIterator iter(this, NULL, NULL);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* reloc = iter.oop_reloc();
+      if (!reloc->oop_is_immediate()) {
+        reloc->verify_oop_relocation();
+      }
+    }
+  }
+}
+
+
+ScopeDesc* nmethod::scope_desc_at(address pc) {
+  PcDesc* pd = pc_desc_at(pc);
+  guarantee(pd != NULL, "scope must be present");
+  return new ScopeDesc(this, pd->scope_decode_offset(),
+                       pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
+                       pd->return_oop());
+}
+
+
+void nmethod::clear_inline_caches() {
+  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
+  if (is_zombie()) {
+    return;
+  }
+
+  RelocIterator iter(this);
+  while (iter.next()) {
+    iter.reloc()->clear_inline_cache();
+  }
+}
+
+
+void nmethod::cleanup_inline_caches() {
+
+  assert_locked_or_safepoint(CompiledIC_lock);
+
+  // If the method is not entrant or zombie then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (!is_in_use()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // This means that the low_boundary is going to be a little too high.
+    // This shouldn't matter, since oops of non-entrant methods are never used.
+    // In fact, why are we bothering to look at oops in a non-entrant method??
+  }
+
+  // Find all calls in an nmethod, and clear the ones that points to zombie methods
+  ResourceMark rm;
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+    switch(iter.type()) {
+      case relocInfo::virtual_call_type:
+      case relocInfo::opt_virtual_call_type: {
+        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        // Ok, to lookup references to zombies here
+        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Clean inline caches pointing to both zombie and not_entrant methods
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
+        }
+        break;
+      }
+      case relocInfo::static_call_type: {
+        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
+        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Clean inline caches pointing to both zombie and not_entrant methods
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
+        }
+        break;
+      }
+    }
+  }
+}
+
+// This is a private interface with the sweeper.
+void nmethod::mark_as_seen_on_stack() {
+  assert(is_not_entrant(), "must be a non-entrant method");
+  // Set the traversal mark to ensure that the sweeper does 2
+  // cleaning passes before moving to zombie.
+  set_stack_traversal_mark(NMethodSweeper::traversal_count());
+}
+
+// Tell if a non-entrant method can be converted to a zombie (i.e.,
+// there are no activations on the stack, not in use by the VM,
+// and not in use by the ServiceThread)
+bool nmethod::can_not_entrant_be_converted() {
+  assert(is_not_entrant(), "must be a non-entrant method");
+
+  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
+  // count can be greater than the stack traversal count before it hits the
+  // nmethod for the second time.
+  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
+         !is_locked_by_vm();
+}
+
+void nmethod::inc_decompile_count() {
+#ifndef GRAAL
+  if (!is_compiled_by_c2()) return;
+#endif
+  // Could be gated by ProfileTraps, but do not bother...
+  Method* m = method();
+  if (m == NULL)  return;
+  MethodData* mdo = m->method_data();
+  if (mdo == NULL)  return;
+  // There is a benign race here.  See comments in methodData.hpp.
+  mdo->inc_decompile_count();
+}
+
+void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
+
+  post_compiled_method_unload();
+
+  // Since this nmethod is being unloaded, make sure that dependencies
+  // recorded in instanceKlasses get flushed and pass non-NULL closure to
+  // indicate that this work is being done during a GC.
+  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
+  assert(is_alive != NULL, "Should be non-NULL");
+  // A non-NULL is_alive closure indicates that this is being called during GC.
+  flush_dependencies(is_alive);
+
+  // Break cycle between nmethod & method
+  if (TraceClassUnloading && WizardMode) {
+    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
+                  " unloadable], Method*(" INTPTR_FORMAT
+                  "), cause(" INTPTR_FORMAT ")",
+                  this, (address)_method, (address)cause);
+    if (!Universe::heap()->is_gc_active())
+      cause->klass()->print();
+  }
+  // Unlink the osr method, so we do not look this up again
+  if (is_osr_method()) {
+    invalidate_osr_method();
+  }
+  // If _method is already NULL the Method* is about to be unloaded,
+  // so we don't have to break the cycle. Note that it is possible to
+  // have the Method* live here, in case we unload the nmethod because
+  // it is pointing to some oop (other than the Method*) being unloaded.
+  if (_method != NULL) {
+    // OSR methods point to the Method*, but the Method* does not
+    // point back!
+    if (_method->code() == this) {
+      _method->clear_code(); // Break a cycle
+    }
+    _method = NULL;            // Clear the method of this dead nmethod
+  }
+
+#ifdef GRAAL
+    if (_graal_installed_code != NULL) {
+      HotSpotInstalledCode::set_nmethod(_graal_installed_code, 0);
+      _graal_installed_code = NULL;
+    }
+#endif
+
+  // Make the class unloaded - i.e., change state and notify sweeper
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  if (is_in_use()) {
+    // Transitioning directly from live to unloaded -- so
+    // we need to force a cache clean-up; remember this
+    // for later on.
+    CodeCache::set_needs_cache_clean(true);
+  }
+  _state = unloaded;
+
+  // Log the unloading.
+  log_state_change();
+
+  // The Method* is gone at this point
+  assert(_method == NULL, "Tautology");
+
+  set_osr_link(NULL);
+  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
+  NMethodSweeper::notify(this);
+}
+
+void nmethod::invalidate_osr_method() {
+  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
+  // Remove from list of active nmethods
+  if (method() != NULL)
+    method()->method_holder()->remove_osr_nmethod(this);
+  // Set entry as invalid
+  _entry_bci = InvalidOSREntryBci;
+}
+
+void nmethod::log_state_change() const {
+  if (LogCompilation) {
+    if (xtty != NULL) {
+      ttyLocker ttyl;  // keep the following output all in one block
+      if (_state == unloaded) {
+        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
+                         os::current_thread_id());
+      } else {
+        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
+                         os::current_thread_id(),
+                         (_state == zombie ? " zombie='1'" : ""));
+      }
+      log_identity(xtty);
+      xtty->stamp();
+      xtty->end_elem();
+    }
+  }
+  if (PrintCompilation && _state != unloaded) {
+    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
+  }
+}
+
+// Common functionality for both make_not_entrant and make_zombie
+bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
+  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
+  assert(!is_zombie(), "should not already be a zombie");
+
+  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
+  nmethodLocker nml(this);
+  methodHandle the_method(method());
+  No_Safepoint_Verifier nsv;
+
+  {
+    // invalidate osr nmethod before acquiring the patching lock since
+    // they both acquire leaf locks and we don't want a deadlock.
+    // This logic is equivalent to the logic below for patching the
+    // verified entry point of regular methods.
+    if (is_osr_method()) {
+      // this effectively makes the osr nmethod not entrant
+      invalidate_osr_method();
+    }
+
+    // Enter critical section.  Does not block for safepoint.
+    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+
+    if (_state == state) {
+      // another thread already performed this transition so nothing
+      // to do, but return false to indicate this.
+      return false;
+    }
+
+#ifdef GRAAL
+    if (_graal_installed_code != NULL) {
+      HotSpotInstalledCode::set_nmethod(_graal_installed_code, 0);
+      _graal_installed_code = NULL;
+    }
+#endif
+
+    // The caller can be calling the method statically or through an inline
+    // cache call.
+    if (!is_osr_method() && !is_not_entrant()) {
+      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
+                  SharedRuntime::get_handle_wrong_method_stub());
+    }
+
+    if (is_in_use()) {
+      // It's a true state change, so mark the method as decompiled.
+      // Do it only for transition from alive.
+      inc_decompile_count();
+    }
+
+    // Change state
+    _state = state;
+
+    // Log the transition once
+    log_state_change();
+
+    // Remove nmethod from method.
+    // We need to check if both the _code and _from_compiled_code_entry_point
+    // refer to this nmethod because there is a race in setting these two fields
+    // in Method* as seen in bugid 4947125.
+    // If the vep() points to the zombie nmethod, the memory for the nmethod
+    // could be flushed and the compiler and vtable stubs could still call
+    // through it.
+    if (method() != NULL && (method()->code() == this ||
+                             method()->from_compiled_entry() == verified_entry_point())) {
+      HandleMark hm;
+      method()->clear_code();
+    }
+
+    if (state == not_entrant) {
+      mark_as_seen_on_stack();
+    }
+
+  } // leave critical region under Patching_lock
+
+  // When the nmethod becomes zombie it is no longer alive so the
+  // dependencies must be flushed.  nmethods in the not_entrant
+  // state will be flushed later when the transition to zombie
+  // happens or they get unloaded.
+  if (state == zombie) {
+    {
+      // Flushing dependecies must be done before any possible
+      // safepoint can sneak in, otherwise the oops used by the
+      // dependency logic could have become stale.
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      flush_dependencies(NULL);
+    }
+
+    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
+    // event and it hasn't already been reported for this nmethod then
+    // report it now. The event may have been reported earilier if the GC
+    // marked it for unloading). JvmtiDeferredEventQueue support means
+    // we no longer go to a safepoint here.
+    post_compiled_method_unload();
+
+#ifdef ASSERT
+    // It's no longer safe to access the oops section since zombie
+    // nmethods aren't scanned for GC.
+    _oops_are_stale = true;
+#endif
+  } else {
+    assert(state == not_entrant, "other cases may need to be handled differently");
+  }
+
+  if (TraceCreateZombies) {
+    tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
+  }
+
+  // Make sweeper aware that there is a zombie method that needs to be removed
+  NMethodSweeper::notify(this);
+
+  return true;
+}
+
+void nmethod::flush() {
+  // Note that there are no valid oops in the nmethod anymore.
+  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
+  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
+
+  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  // completely deallocate this method
+  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
+  if (PrintMethodFlushing) {
+    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
+        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
+  }
+
+  // We need to deallocate any ExceptionCache data.
+  // Note that we do not need to grab the nmethod lock for this, it
+  // better be thread safe if we're disposing of it!
+  ExceptionCache* ec = exception_cache();
+  set_exception_cache(NULL);
+  while(ec != NULL) {
+    ExceptionCache* next = ec->next();
+    delete ec;
+    ec = next;
+  }
+
+  if (on_scavenge_root_list()) {
+    CodeCache::drop_scavenge_root_nmethod(this);
+  }
+
+  if (is_speculatively_disconnected()) {
+    CodeCache::remove_saved_code(this);
+  }
+
+#ifdef SHARK
+  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
+#endif // SHARK
+
+  ((CodeBlob*)(this))->flush();
+
+  CodeCache::free(this);
+}
+
+
+//
+// Notify all classes this nmethod is dependent on that it is no
+// longer dependent. This should only be called in two situations.
+// First, when a nmethod transitions to a zombie all dependents need
+// to be clear.  Since zombification happens at a safepoint there's no
+// synchronization issues.  The second place is a little more tricky.
+// During phase 1 of mark sweep class unloading may happen and as a
+// result some nmethods may get unloaded.  In this case the flushing
+// of dependencies must happen during phase 1 since after GC any
+// dependencies in the unloaded nmethod won't be updated, so
+// traversing the dependency information in unsafe.  In that case this
+// function is called with a non-NULL argument and this function only
+// notifies instanceKlasses that are reachable
+
+void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
+  "is_alive is non-NULL if and only if we are called during GC");
+  if (!has_flushed_dependencies()) {
+    set_has_flushed_dependencies();
+    for (Dependencies::DepStream deps(this); deps.next(); ) {
+      Klass* klass = deps.context_type();
+      if (klass == NULL)  continue;  // ignore things like evol_method
+
+      // During GC the is_alive closure is non-NULL, and is used to
+      // determine liveness of dependees that need to be updated.
+      if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
+        InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
+      }
+    }
+  }
+}
+
+
+// If this oop is not live, the nmethod can be unloaded.
+bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
+  assert(root != NULL, "just checking");
+  oop obj = *root;
+  if (obj == NULL || is_alive->do_object_b(obj)) {
+      return false;
+  }
+
+  // If ScavengeRootsInCode is true, an nmethod might be unloaded
+  // simply because one of its constant oops has gone dead.
+  // No actual classes need to be unloaded in order for this to occur.
+  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
+  make_unloaded(is_alive, obj);
+  return true;
+}
+
+// ------------------------------------------------------------------
+// post_compiled_method_load_event
+// new method for install_code() path
+// Transfer information from compilation to jvmti
+void nmethod::post_compiled_method_load_event() {
+
+  Method* moop = method();
+#ifndef USDT2
+  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
+      moop->klass_name()->bytes(),
+      moop->klass_name()->utf8_length(),
+      moop->name()->bytes(),
+      moop->name()->utf8_length(),
+      moop->signature()->bytes(),
+      moop->signature()->utf8_length(),
+      insts_begin(), insts_size());
+#else /* USDT2 */
+  HOTSPOT_COMPILED_METHOD_LOAD(
+      (char *) moop->klass_name()->bytes(),
+      moop->klass_name()->utf8_length(),
+      (char *) moop->name()->bytes(),
+      moop->name()->utf8_length(),
+      (char *) moop->signature()->bytes(),
+      moop->signature()->utf8_length(),
+      insts_begin(), insts_size());
+#endif /* USDT2 */
+
+  if (JvmtiExport::should_post_compiled_method_load() ||
+      JvmtiExport::should_post_compiled_method_unload()) {
+    get_and_cache_jmethod_id();
+  }
+
+  if (JvmtiExport::should_post_compiled_method_load()) {
+    // Let the Service thread (which is a real Java thread) post the event
+    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+    JvmtiDeferredEventQueue::enqueue(
+      JvmtiDeferredEvent::compiled_method_load_event(this));
+  }
+}
+
+jmethodID nmethod::get_and_cache_jmethod_id() {
+  if (_jmethod_id == NULL) {
+    // Cache the jmethod_id since it can no longer be looked up once the
+    // method itself has been marked for unloading.
+    _jmethod_id = method()->jmethod_id();
+  }
+  return _jmethod_id;
+}
+
+void nmethod::post_compiled_method_unload() {
+  if (unload_reported()) {
+    // During unloading we transition to unloaded and then to zombie
+    // and the unloading is reported during the first transition.
+    return;
+  }
+
+  assert(_method != NULL && !is_unloaded(), "just checking");
+  DTRACE_METHOD_UNLOAD_PROBE(method());
+
+  // If a JVMTI agent has enabled the CompiledMethodUnload event then
+  // post the event. Sometime later this nmethod will be made a zombie
+  // by the sweeper but the Method* will not be valid at that point.
+  // If the _jmethod_id is null then no load event was ever requested
+  // so don't bother posting the unload.  The main reason for this is
+  // that the jmethodID is a weak reference to the Method* so if
+  // it's being unloaded there's no way to look it up since the weak
+  // ref will have been cleared.
+  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
+    assert(!unload_reported(), "already unloaded");
+    JvmtiDeferredEvent event =
+      JvmtiDeferredEvent::compiled_method_unload_event(this,
+          _jmethod_id, insts_begin());
+    if (SafepointSynchronize::is_at_safepoint()) {
+      // Don't want to take the queueing lock. Add it as pending and
+      // it will get enqueued later.
+      JvmtiDeferredEventQueue::add_pending_event(event);
+    } else {
+      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+      JvmtiDeferredEventQueue::enqueue(event);
+    }
+  }
+
+  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
+  // any time. As the nmethod is being unloaded now we mark it has
+  // having the unload event reported - this will ensure that we don't
+  // attempt to report the event in the unlikely scenario where the
+  // event is enabled at the time the nmethod is made a zombie.
+  set_unload_reported();
+}
+
+// This is called at the end of the strong tracing/marking phase of a
+// GC to unload an nmethod if it contains otherwise unreachable
+// oops.
+
+void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
+  // Make sure the oop's ready to receive visitors
+  assert(!is_zombie() && !is_unloaded(),
+         "should not call follow on zombie or unloaded nmethod");
+
+  // If the method is not entrant then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+  // The RedefineClasses() API can cause the class unloading invariant
+  // to no longer be true. See jvmtiExport.hpp for details.
+  // Also, leave a debugging breadcrumb in local flag.
+  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
+  if (a_class_was_redefined) {
+    // This set of the unloading_occurred flag is done before the
+    // call to post_compiled_method_unload() so that the unloading
+    // of this nmethod is reported.
+    unloading_occurred = true;
+  }
+
+#ifdef GRAAL
+  // Follow Graal method
+  if (_graal_installed_code != NULL && can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) {
+    return;
+  }
+#endif
+
+  // Exception cache
+  ExceptionCache* ec = exception_cache();
+  while (ec != NULL) {
+    Klass* ex_klass = ec->exception_type();
+    ExceptionCache* next_ec = ec->next();
+    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
+      remove_from_exception_cache(ec);
+    }
+    ec = next_ec;
+  }
+
+  // If class unloading occurred we first iterate over all inline caches and
+  // clear ICs where the cached oop is referring to an unloaded klass or method.
+  // The remaining live cached oops will be traversed in the relocInfo::oop_type
+  // iteration below.
+  if (unloading_occurred) {
+    RelocIterator iter(this, low_boundary);
+    while(iter.next()) {
+      if (iter.type() == relocInfo::virtual_call_type) {
+        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        if (ic->is_icholder_call()) {
+          // The only exception is compiledICHolder oops which may
+          // yet be marked below. (We check this further below).
+          CompiledICHolder* cichk_oop = ic->cached_icholder();
+          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
+              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
+              continue;
+            }
+        } else {
+          Metadata* ic_oop = ic->cached_metadata();
+          if (ic_oop != NULL) {
+            if (ic_oop->is_klass()) {
+              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
+                continue;
+              }
+            } else if (ic_oop->is_method()) {
+              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
+                continue;
+              }
+            } else {
+              ShouldNotReachHere();
+            }
+          }
+          }
+          ic->set_to_clean();
+      }
+    }
+  }
+
+  // Compiled code
+  {
+  RelocIterator iter(this, low_boundary);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* r = iter.oop_reloc();
+      // In this loop, we must only traverse those oops directly embedded in
+      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
+      assert(1 == (r->oop_is_immediate()) +
+                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+             "oop must be found in exactly one place");
+      if (r->oop_is_immediate() && r->oop_value() != NULL) {
+        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
+          return;
+        }
+      }
+    }
+  }
+  }
+
+
+  // Scopes
+  for (oop* p = oops_begin(); p < oops_end(); p++) {
+    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
+    if (can_unload(is_alive, p, unloading_occurred)) {
+      return;
+    }
+  }
+
+  // Ensure that all metadata is still alive
+  verify_metadata_loaders(low_boundary, is_alive);
+}
+
+#ifdef ASSERT
+
+class CheckClass : AllStatic {
+  static BoolObjectClosure* _is_alive;
+
+  // Check class_loader is alive for this bit of metadata.
+  static void check_class(Metadata* md) {
+    Klass* klass = NULL;
+    if (md->is_klass()) {
+      klass = ((Klass*)md);
+    } else if (md->is_method()) {
+      klass = ((Method*)md)->method_holder();
+    } else if (md->is_methodData()) {
+      klass = ((MethodData*)md)->method()->method_holder();
+    } else {
+      md->print();
+      ShouldNotReachHere();
+    }
+    assert(klass->is_loader_alive(_is_alive), "must be alive");
+  }
+ public:
+  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
+    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
+    _is_alive = is_alive;
+    nm->metadata_do(check_class);
+  }
+};
+
+// This is called during a safepoint so can use static data
+BoolObjectClosure* CheckClass::_is_alive = NULL;
+#endif // ASSERT
+
+
+// Processing of oop references should have been sufficient to keep
+// all strong references alive.  Any weak references should have been
+// cleared as well.  Visit all the metadata and ensure that it's
+// really alive.
+void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
+#ifdef ASSERT
+  RelocIterator iter(this, low_boundary);
+  while (iter.next()) {
+    // static_stub_Relocations may have dangling references to
+    // Method*s so trim them out here.  Otherwise it looks like
+    // compiled code is maintaining a link to dead metadata.
+    address static_call_addr = NULL;
+    if (iter.type() == relocInfo::opt_virtual_call_type) {
+      CompiledIC* cic = CompiledIC_at(iter.reloc());
+      if (!cic->is_call_to_interpreted()) {
+        static_call_addr = iter.addr();
+        cic->set_to_clean();
+      }
+    } else if (iter.type() == relocInfo::static_call_type) {
+      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
+      if (!csc->is_call_to_interpreted()) {
+        static_call_addr = iter.addr();
+        csc->set_to_clean();
+      }
+    }
+    if (static_call_addr != NULL) {
+      RelocIterator sciter(this, low_boundary);
+      while (sciter.next()) {
+        if (sciter.type() == relocInfo::static_stub_type &&
+            sciter.static_stub_reloc()->static_call() == static_call_addr) {
+          sciter.static_stub_reloc()->clear_inline_cache();
+        }
+      }
+    }
+  }
+  // Check that the metadata embedded in the nmethod is alive
+  CheckClass::do_check_class(is_alive, this);
+#endif
+}
+
+
+// Iterate over metadata calling this function.   Used by RedefineClasses
+void nmethod::metadata_do(void f(Metadata*)) {
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+  {
+    // Visit all immediate references that are embedded in the instruction stream.
+    RelocIterator iter(this, low_boundary);
+    while (iter.next()) {
+      if (iter.type() == relocInfo::metadata_type ) {
+        metadata_Relocation* r = iter.metadata_reloc();
+        // In this lmetadata, we must only follow those metadatas directly embedded in
+        // the code.  Other metadatas (oop_index>0) are seen as part of
+        // the metadata section below.
+        assert(1 == (r->metadata_is_immediate()) +
+               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
+               "metadata must be found in exactly one place");
+        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
+          Metadata* md = r->metadata_value();
+          f(md);
+        }
+      }
+    }
+  }
+
+  // Visit the metadata section
+  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
+    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
+    Metadata* md = *p;
+    f(md);
+  }
+  // Call function Method*, not embedded in these other places.
+  if (_method != NULL) f(_method);
+}
+
+
+// This method is called twice during GC -- once while
+// tracing the "active" nmethods on thread stacks during
+// the (strong) marking phase, and then again when walking
+// the code cache contents during the weak roots processing
+// phase. The two uses are distinguished by means of the
+// 'do_strong_roots_only' flag, which is true in the first
+// case. We want to walk the weak roots in the nmethod
+// only in the second case. The weak roots in the nmethod
+// are the oops in the ExceptionCache and the InlineCache
+// oops.
+void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
+  // make sure the oops ready to receive visitors
+  assert(!is_zombie() && !is_unloaded(),
+         "should not call follow on zombie or unloaded nmethod");
+
+  // If the method is not entrant or zombie then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+#ifdef GRAAL
+  if (_graal_installed_code != NULL) {
+    f->do_oop((oop*) &_graal_installed_code);
+  }
+#endif
+
+  RelocIterator iter(this, low_boundary);
+
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type ) {
+      oop_Relocation* r = iter.oop_reloc();
+      // In this loop, we must only follow those oops directly embedded in
+      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
+      assert(1 == (r->oop_is_immediate()) +
+                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+             "oop must be found in exactly one place");
+      if (r->oop_is_immediate() && r->oop_value() != NULL) {
+        f->do_oop(r->oop_addr());
+      }
+    }
+  }
+
+  // Scopes
+  // This includes oop constants not inlined in the code stream.
+  for (oop* p = oops_begin(); p < oops_end(); p++) {
+    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
+    f->do_oop(p);
+  }
+}
+
+#define NMETHOD_SENTINEL ((nmethod*)badAddress)
+
+nmethod* volatile nmethod::_oops_do_mark_nmethods;
+
+// An nmethod is "marked" if its _mark_link is set non-null.
+// Even if it is the end of the linked list, it will have a non-null link value,
+// as long as it is on the list.
+// This code must be MP safe, because it is used from parallel GC passes.
+bool nmethod::test_set_oops_do_mark() {
+  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
+  nmethod* observed_mark_link = _oops_do_mark_link;
+  if (observed_mark_link == NULL) {
+    // Claim this nmethod for this thread to mark.
+    observed_mark_link = (nmethod*)
+      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
+    if (observed_mark_link == NULL) {
+
+      // Atomically append this nmethod (now claimed) to the head of the list:
+      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
+      for (;;) {
+        nmethod* required_mark_nmethods = observed_mark_nmethods;
+        _oops_do_mark_link = required_mark_nmethods;
+        observed_mark_nmethods = (nmethod*)
+          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
+        if (observed_mark_nmethods == required_mark_nmethods)
+          break;
+      }
+      // Mark was clear when we first saw this guy.
+      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
+      return false;
+    }
+  }
+  // On fall through, another racing thread marked this nmethod before we did.
+  return true;
+}
+
+void nmethod::oops_do_marking_prologue() {
+  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
+  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
+  // We use cmpxchg_ptr instead of regular assignment here because the user
+  // may fork a bunch of threads, and we need them all to see the same state.
+  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
+  guarantee(observed == NULL, "no races in this sequential code");
+}
+
+void nmethod::oops_do_marking_epilogue() {
+  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
+  nmethod* cur = _oops_do_mark_nmethods;
+  while (cur != NMETHOD_SENTINEL) {
+    assert(cur != NULL, "not NULL-terminated");
+    nmethod* next = cur->_oops_do_mark_link;
+    cur->_oops_do_mark_link = NULL;
+    cur->fix_oop_relocations();
+    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
+    cur = next;
+  }
+  void* required = _oops_do_mark_nmethods;
+  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
+  guarantee(observed == required, "no races in this sequential code");
+  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
+}
+
+class DetectScavengeRoot: public OopClosure {
+  bool     _detected_scavenge_root;
+public:
+  DetectScavengeRoot() : _detected_scavenge_root(false)
+  { NOT_PRODUCT(_print_nm = NULL); }
+  bool detected_scavenge_root() { return _detected_scavenge_root; }
+  virtual void do_oop(oop* p) {
+    if ((*p) != NULL && (*p)->is_scavengable()) {
+      NOT_PRODUCT(maybe_print(p));
+      _detected_scavenge_root = true;
+    }
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+#ifndef PRODUCT
+  nmethod* _print_nm;
+  void maybe_print(oop* p) {
+    if (_print_nm == NULL)  return;
+    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
+    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
+                  (intptr_t)(*p), (intptr_t)p);
+    (*p)->print();
+  }
+#endif //PRODUCT
+};
+
+bool nmethod::detect_scavenge_root_oops() {
+  DetectScavengeRoot detect_scavenge_root;
+  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
+  oops_do(&detect_scavenge_root);
+  return detect_scavenge_root.detected_scavenge_root();
+}
+
+// Method that knows how to preserve outgoing arguments at call. This method must be
+// called with a frame corresponding to a Java invoke
+void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
+#ifndef SHARK
+  if (!method()->is_native()) {
+    SimpleScopeDesc ssd(this, fr.pc());
+    Bytecode_invoke call(ssd.method(), ssd.bci());
+    // compiled invokedynamic call sites have an implicit receiver at
+    // resolution time, so make sure it gets GC'ed.
+    bool has_receiver = !call.is_invokestatic();
+    Symbol* signature = call.signature();
+    fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
+  }
+#endif // !SHARK
+}
+
+
+oop nmethod::embeddedOop_at(u_char* p) {
+  RelocIterator iter(this, p, p + 1);
+  while (iter.next())
+    if (iter.type() == relocInfo::oop_type) {
+      return iter.oop_reloc()->oop_value();
+    }
+  return NULL;
+}
+
+
+inline bool includes(void* p, void* from, void* to) {
+  return from <= p && p < to;
+}
+
+
+void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
+  assert(count >= 2, "must be sentinel values, at least");
+
+#ifdef ASSERT
+  // must be sorted and unique; we do a binary search in find_pc_desc()
+  int prev_offset = pcs[0].pc_offset();
+  assert(prev_offset == PcDesc::lower_offset_limit,
+         "must start with a sentinel");
+  for (int i = 1; i < count; i++) {
+    int this_offset = pcs[i].pc_offset();
+    assert(this_offset > prev_offset, "offsets must be sorted");
+    prev_offset = this_offset;
+  }
+  assert(prev_offset == PcDesc::upper_offset_limit,
+         "must end with a sentinel");
+#endif //ASSERT
+
+  // Search for MethodHandle invokes and tag the nmethod.
+  for (int i = 0; i < count; i++) {
+    if (pcs[i].is_method_handle_invoke()) {
+      set_has_method_handle_invokes(true);
+      break;
+    }
+  }
+  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
+
+  int size = count * sizeof(PcDesc);
+  assert(scopes_pcs_size() >= size, "oob");
+  memcpy(scopes_pcs_begin(), pcs, size);
+
+  // Adjust the final sentinel downward.
+  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
+  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
+  last_pc->set_pc_offset(content_size() + 1);
+  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
+    // Fill any rounding gaps with copies of the last record.
+    last_pc[1] = last_pc[0];
+  }
+  // The following assert could fail if sizeof(PcDesc) is not
+  // an integral multiple of oopSize (the rounding term).
+  // If it fails, change the logic to always allocate a multiple
+  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
+  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
+}
+
+void nmethod::copy_scopes_data(u_char* buffer, int size) {
+  assert(scopes_data_size() >= size, "oob");
+  memcpy(scopes_data_begin(), buffer, size);
+}
+
+
+#ifdef ASSERT
+static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
+  PcDesc* lower = nm->scopes_pcs_begin();
+  PcDesc* upper = nm->scopes_pcs_end();
+  lower += 1; // exclude initial sentinel
+  PcDesc* res = NULL;
+  for (PcDesc* p = lower; p < upper; p++) {
+    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
+    if (match_desc(p, pc_offset, approximate)) {
+      if (res == NULL)
+        res = p;
+      else
+        res = (PcDesc*) badAddress;
+    }
+  }
+  return res;
+}
+#endif
+
+
+// Finds a PcDesc with real-pc equal to "pc"
+PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
+  address base_address = code_begin();
+  if ((pc < base_address) ||
+      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
+    return NULL;  // PC is wildly out of range
+  }
+  int pc_offset = (int) (pc - base_address);
+
+  // Check the PcDesc cache if it contains the desired PcDesc
+  // (This as an almost 100% hit rate.)
+  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
+  if (res != NULL) {
+    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
+    return res;
+  }
+
+  // Fallback algorithm: quasi-linear search for the PcDesc
+  // Find the last pc_offset less than the given offset.
+  // The successor must be the required match, if there is a match at all.
+  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
+  PcDesc* lower = scopes_pcs_begin();
+  PcDesc* upper = scopes_pcs_end();
+  upper -= 1; // exclude final sentinel
+  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
+
+#define assert_LU_OK \
+  /* invariant on lower..upper during the following search: */ \
+  assert(lower->pc_offset() <  pc_offset, "sanity"); \
+  assert(upper->pc_offset() >= pc_offset, "sanity")
+  assert_LU_OK;
+
+  // Use the last successful return as a split point.
+  PcDesc* mid = _pc_desc_cache.last_pc_desc();
+  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
+  if (mid->pc_offset() < pc_offset) {
+    lower = mid;
+  } else {
+    upper = mid;
+  }
+
+  // Take giant steps at first (4096, then 256, then 16, then 1)
+  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
+  const int RADIX = (1 << LOG2_RADIX);
+  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
+    while ((mid = lower + step) < upper) {
+      assert_LU_OK;
+      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
+      if (mid->pc_offset() < pc_offset) {
+        lower = mid;
+      } else {
+        upper = mid;
+        break;
+      }
+    }
+    assert_LU_OK;
+  }
+
+  // Sneak up on the value with a linear search of length ~16.
+  while (true) {
+    assert_LU_OK;
+    mid = lower + 1;
+    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
+    if (mid->pc_offset() < pc_offset) {
+      lower = mid;
+    } else {
+      upper = mid;
+      break;
+    }
+  }
+#undef assert_LU_OK
+
+  if (match_desc(upper, pc_offset, approximate)) {
+    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
+    _pc_desc_cache.add_pc_desc(upper);
+    return upper;
+  } else {
+    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
+    return NULL;
+  }
+}
+
+
+bool nmethod::check_all_dependencies() {
+  bool found_check = false;
+  // wholesale check of all dependencies
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    if (deps.check_dependency() != NULL) {
+      found_check = true;
+      NOT_DEBUG(break);
+    }
+  }
+  return found_check;  // tell caller if we found anything
+}
+
+bool nmethod::check_dependency_on(DepChange& changes) {
+  // What has happened:
+  // 1) a new class dependee has been added
+  // 2) dependee and all its super classes have been marked
+  bool found_check = false;  // set true if we are upset
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    // Evaluate only relevant dependencies.
+    if (deps.spot_check_dependency_at(changes) != NULL) {
+      found_check = true;
+      NOT_DEBUG(break);
+    }
+  }
+  return found_check;
+}
+
+bool nmethod::is_evol_dependent_on(Klass* dependee) {
+  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
+  Array<Method*>* dependee_methods = dependee_ik->methods();
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    if (deps.type() == Dependencies::evol_method) {
+      Method* method = deps.method_argument(0);
+      for (int j = 0; j < dependee_methods->length(); j++) {
+        if (dependee_methods->at(j) == method) {
+          // RC_TRACE macro has an embedded ResourceMark
+          RC_TRACE(0x01000000,
+            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
+            _method->method_holder()->external_name(),
+            _method->name()->as_C_string(),
+            _method->signature()->as_C_string(), compile_id(),
+            method->method_holder()->external_name(),
+            method->name()->as_C_string(),
+            method->signature()->as_C_string()));
+          if (TraceDependencies || LogCompilation)
+            deps.log_dependency(dependee);
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
+// Called from mark_for_deoptimization, when dependee is invalidated.
+bool nmethod::is_dependent_on_method(Method* dependee) {
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    if (deps.type() != Dependencies::evol_method)
+      continue;
+    Method* method = deps.method_argument(0);
+    if (method == dependee) return true;
+  }
+  return false;
+}
+
+
+bool nmethod::is_patchable_at(address instr_addr) {
+  assert(insts_contains(instr_addr), "wrong nmethod used");
+  if (is_zombie()) {
+    // a zombie may never be patched
+    return false;
+  }
+  return true;
+}
+
+
+address nmethod::continuation_for_implicit_exception(address pc) {
+  // Exception happened outside inline-cache check code => we are inside
+  // an active nmethod => use cpc to determine a return address
+  int exception_offset = pc - code_begin();
+  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
+#ifdef ASSERT
+  if (cont_offset == 0) {
+    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
+    HandleMark hm(thread);
+    ResourceMark rm(thread);
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    assert(cb != NULL && cb == this, "");
+    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
+    print();
+    method()->print_codes();
+    print_code();
+    print_pcs();
+  }
+#endif
+  if (cont_offset == 0) {
+    // Let the normal error handling report the exception
+    return NULL;
+  }
+  return code_begin() + cont_offset;
+}
+
+
+
+void nmethod_init() {
+  // make sure you didn't forget to adjust the filler fields
+  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
+}
+
+
+//-------------------------------------------------------------------------------------------
+
+
+// QQQ might we make this work from a frame??
+nmethodLocker::nmethodLocker(address pc) {
+  CodeBlob* cb = CodeCache::find_blob(pc);
+  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
+  _nm = (nmethod*)cb;
+  lock_nmethod(_nm);
+}
+
+// Only JvmtiDeferredEvent::compiled_method_unload_event()
+// should pass zombie_ok == true.
+void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
+  if (nm == NULL)  return;
+  Atomic::inc(&nm->_lock_count);
+  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
+}
+
+void nmethodLocker::unlock_nmethod(nmethod* nm) {
+  if (nm == NULL)  return;
+  Atomic::dec(&nm->_lock_count);
+  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
+}
+
+
+// -----------------------------------------------------------------------------
+// nmethod::get_deopt_original_pc
+//
+// Return the original PC for the given PC if:
+// (a) the given PC belongs to a nmethod and
+// (b) it is a deopt PC
+address nmethod::get_deopt_original_pc(const frame* fr) {
+  if (fr->cb() == NULL)  return NULL;
+
+  nmethod* nm = fr->cb()->as_nmethod_or_null();
+  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
+    return nm->get_original_pc(fr);
+
+  return NULL;
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandle
+
+bool nmethod::is_method_handle_return(address return_pc) {
+  if (!has_method_handle_invokes())  return false;
+  PcDesc* pd = pc_desc_at(return_pc);
+  if (pd == NULL)
+    return false;
+  return pd->is_method_handle_invoke();
+}
+
+
+// -----------------------------------------------------------------------------
+// Verification
+
+class VerifyOopsClosure: public OopClosure {
+  nmethod* _nm;
+  bool     _ok;
+public:
+  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
+  bool ok() { return _ok; }
+  virtual void do_oop(oop* p) {
+    if ((*p) == NULL || (*p)->is_oop())  return;
+    if (_ok) {
+      _nm->print_nmethod(true);
+      _ok = false;
+    }
+    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
+void nmethod::verify() {
+
+  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
+  // seems odd.
+
+  if( is_zombie() || is_not_entrant() )
+    return;
+
+  // Make sure all the entry points are correctly aligned for patching.
+  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
+
+  // assert(method()->is_oop(), "must be valid");
+
+  ResourceMark rm;
+
+  if (!CodeCache::contains(this)) {
+    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
+  }
+
+  if(is_native_method() )
+    return;
+
+  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
+  if (nm != this) {
+    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
+                  this));
+  }
+
+  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
+    if (! p->verify(this)) {
+      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
+    }
+  }
+
+  VerifyOopsClosure voc(this);
+  oops_do(&voc);
+  assert(voc.ok(), "embedded oops must be OK");
+  verify_scavenge_root_oops();
+
+  verify_scopes();
+}
+
+
+void nmethod::verify_interrupt_point(address call_site) {
+  // This code does not work in release mode since
+  // owns_lock only is available in debug mode.
+  CompiledIC* ic = NULL;
+  Thread *cur = Thread::current();
+  if (CompiledIC_lock->owner() == cur ||
+      ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
+       SafepointSynchronize::is_at_safepoint())) {
+    ic = CompiledIC_at(this, call_site);
+    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+  } else {
+    MutexLocker ml_verify (CompiledIC_lock);
+    ic = CompiledIC_at(this, call_site);
+  }
+  PcDesc* pd = pc_desc_at(ic->end_of_call());
+  assert(pd != NULL, "PcDesc must exist");
+  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
+                                     pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
+                                     pd->return_oop());
+       !sd->is_top(); sd = sd->sender()) {
+    sd->verify();
+  }
+}
+
+void nmethod::verify_scopes() {
+  if( !method() ) return;       // Runtime stubs have no scope
+  if (method()->is_native()) return; // Ignore stub methods.
+  // iterate through all interrupt point
+  // and verify the debug information is valid.
+  RelocIterator iter((nmethod*)this);
+  while (iter.next()) {
+    address stub = NULL;
+    switch (iter.type()) {
+      case relocInfo::virtual_call_type:
+        verify_interrupt_point(iter.addr());
+        break;
+      case relocInfo::opt_virtual_call_type:
+        stub = iter.opt_virtual_call_reloc()->static_stub();
+        verify_interrupt_point(iter.addr());
+        break;
+      case relocInfo::static_call_type:
+        stub = iter.static_call_reloc()->static_stub();
+        //verify_interrupt_point(iter.addr());
+        break;
+      case relocInfo::runtime_call_type:
+        address destination = iter.reloc()->value();
+        // Right now there is no way to find out which entries support
+        // an interrupt point.  It would be nice if we had this
+        // information in a table.
+        break;
+    }
+#ifndef GRAAL
+    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
+#endif
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Non-product code
+#ifndef PRODUCT
+
+class DebugScavengeRoot: public OopClosure {
+  nmethod* _nm;
+  bool     _ok;
+public:
+  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
+  bool ok() { return _ok; }
+  virtual void do_oop(oop* p) {
+    if ((*p) == NULL || !(*p)->is_scavengable())  return;
+    if (_ok) {
+      _nm->print_nmethod(true);
+      _ok = false;
+    }
+    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+    (*p)->print();
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
+void nmethod::verify_scavenge_root_oops() {
+  if (!on_scavenge_root_list()) {
+    // Actually look inside, to verify the claim that it's clean.
+    DebugScavengeRoot debug_scavenge_root(this);
+    oops_do(&debug_scavenge_root);
+    if (!debug_scavenge_root.ok())
+      fatal("found an unadvertised bad scavengable oop in the code cache");
+  }
+  assert(scavenge_root_not_marked(), "");
+}
+
+#endif // PRODUCT
+
+// Printing operations
+
+void nmethod::print() const {
+  ResourceMark rm;
+  ttyLocker ttyl;   // keep the following output all in one block
+
+  tty->print("Compiled method ");
+
+  if (is_compiled_by_c1()) {
+    tty->print("(c1) ");
+  } else if (is_compiled_by_c2()) {
+    tty->print("(c2) ");
+  } else if (is_compiled_by_shark()) {
+    tty->print("(shark) ");
+  } else {
+    tty->print("(nm) ");
+  }
+
+  print_on(tty, NULL);
+
+  if (WizardMode) {
+    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
+    tty->print(" for method " INTPTR_FORMAT , (address)method());
+    tty->print(" { ");
+    if (is_in_use())      tty->print("in_use ");
+    if (is_not_entrant()) tty->print("not_entrant ");
+    if (is_zombie())      tty->print("zombie ");
+    if (is_unloaded())    tty->print("unloaded ");
+    if (on_scavenge_root_list())  tty->print("scavenge_root ");
+    tty->print_cr("}:");
+  }
+  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              (address)this,
+                                              (address)this + size(),
+                                              size());
+  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              relocation_begin(),
+                                              relocation_end(),
+                                              relocation_size());
+  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              consts_begin(),
+                                              consts_end(),
+                                              consts_size());
+  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              insts_begin(),
+                                              insts_end(),
+                                              insts_size());
+  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              stub_begin(),
+                                              stub_end(),
+                                              stub_size());
+  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              oops_begin(),
+                                              oops_end(),
+                                              oops_size());
+  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              metadata_begin(),
+                                              metadata_end(),
+                                              metadata_size());
+  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              scopes_data_begin(),
+                                              scopes_data_end(),
+                                              scopes_data_size());
+  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              scopes_pcs_begin(),
+                                              scopes_pcs_end(),
+                                              scopes_pcs_size());
+  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              dependencies_begin(),
+                                              dependencies_end(),
+                                              dependencies_size());
+  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              handler_table_begin(),
+                                              handler_table_end(),
+                                              handler_table_size());
+  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              nul_chk_table_begin(),
+                                              nul_chk_table_end(),
+                                              nul_chk_table_size());
+}
+
+void nmethod::print_code() {
+  HandleMark hm;
+  ResourceMark m;
+  Disassembler::decode(this);
+}
+
+
+#ifndef PRODUCT
+
+void nmethod::print_scopes() {
+  // Find the first pc desc for all scopes in the code and print it.
+  ResourceMark rm;
+  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
+    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
+      continue;
+
+    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
+    sd->print_on(tty, p);
+  }
+}
+
+void nmethod::print_dependencies() {
+  ResourceMark rm;
+  ttyLocker ttyl;   // keep the following output all in one block
+  tty->print_cr("Dependencies:");
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    deps.print_dependency();
+    Klass* ctxk = deps.context_type();
+    if (ctxk != NULL) {
+      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
+        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
+      }
+    }
+    deps.log_dependency();  // put it into the xml log also
+  }
+}
+
+
+void nmethod::print_relocations() {
+  ResourceMark m;       // in case methods get printed via the debugger
+  tty->print_cr("relocations:");
+  RelocIterator iter(this);
+  iter.print();
+  if (UseRelocIndex) {
+    jint* index_end   = (jint*)relocation_end() - 1;
+    jint  index_size  = *index_end;
+    jint* index_start = (jint*)( (address)index_end - index_size );
+    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
+    if (index_size > 0) {
+      jint* ip;
+      for (ip = index_start; ip+2 <= index_end; ip += 2)
+        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
+                      ip[0],
+                      ip[1],
+                      header_end()+ip[0],
+                      relocation_begin()-1+ip[1]);
+      for (; ip < index_end; ip++)
+        tty->print_cr("  (%d ?)", ip[0]);
+      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
+      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
+    }
+  }
+}
+
+
+void nmethod::print_pcs() {
+  ResourceMark m;       // in case methods get printed via debugger
+  tty->print_cr("pc-bytecode offsets:");
+  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
+    p->print(this);
+  }
+}
+
+#endif // PRODUCT
+
+const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
+  RelocIterator iter(this, begin, end);
+  bool have_one = false;
+  while (iter.next()) {
+    have_one = true;
+    switch (iter.type()) {
+        case relocInfo::none:                  return "no_reloc";
+        case relocInfo::oop_type: {
+          stringStream st;
+          oop_Relocation* r = iter.oop_reloc();
+          oop obj = r->oop_value();
+          st.print("oop(");
+          if (obj == NULL) st.print("NULL");
+          else obj->print_value_on(&st);
+          st.print(")");
+          return st.as_string();
+        }
+        case relocInfo::metadata_type: {
+          stringStream st;
+          metadata_Relocation* r = iter.metadata_reloc();
+          Metadata* obj = r->metadata_value();
+          st.print("metadata(");
+          if (obj == NULL) st.print("NULL");
+          else obj->print_value_on(&st);
+          st.print(")");
+          return st.as_string();
+        }
+        case relocInfo::virtual_call_type:     return "virtual_call";
+        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
+        case relocInfo::static_call_type:      return "static_call";
+        case relocInfo::static_stub_type:      return "static_stub";
+        case relocInfo::runtime_call_type:     return "runtime_call";
+        case relocInfo::external_word_type:    return "external_word";
+        case relocInfo::internal_word_type:    return "internal_word";
+        case relocInfo::section_word_type:     return "section_word";
+        case relocInfo::poll_type:             return "poll";
+        case relocInfo::poll_return_type:      return "poll_return";
+        case relocInfo::type_mask:             return "type_bit_mask";
+    }
+  }
+  return have_one ? "other" : NULL;
+}
+
+// Return a the last scope in (begin..end]
+ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
+  PcDesc* p = pc_desc_near(begin+1);
+  if (p != NULL && p->real_pc(this) <= end) {
+    return new ScopeDesc(this, p->scope_decode_offset(),
+                         p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(),
+                         p->return_oop());
+  }
+  return NULL;
+}
+
+void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
+  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
+  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
+  if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
+  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
+  if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
+
+  if (has_method_handle_invokes())
+    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
+
+  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
+
+  if (block_begin == entry_point()) {
+    methodHandle m = method();
+    if (m.not_null()) {
+      stream->print("  # ");
+      m->print_value_on(stream);
+      stream->cr();
+    }
+    if (m.not_null() && !is_osr_method()) {
+      ResourceMark rm;
+      int sizeargs = m->size_of_parameters();
+      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
+      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
+      {
+        int sig_index = 0;
+        if (!m->is_static())
+          sig_bt[sig_index++] = T_OBJECT; // 'this'
+        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
+          BasicType t = ss.type();
+          sig_bt[sig_index++] = t;
+          if (type2size[t] == 2) {
+            sig_bt[sig_index++] = T_VOID;
+          } else {
+            assert(type2size[t] == 1, "size is 1 or 2");
+          }
+        }
+        assert(sig_index == sizeargs, "");
+      }
+      const char* spname = "sp"; // make arch-specific?
+      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
+      int stack_slot_offset = this->frame_size() * wordSize;
+      int tab1 = 14, tab2 = 24;
+      int sig_index = 0;
+      int arg_index = (m->is_static() ? 0 : -1);
+      bool did_old_sp = false;
+      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
+        bool at_this = (arg_index == -1);
+        bool at_old_sp = false;
+        BasicType t = (at_this ? T_OBJECT : ss.type());
+        assert(t == sig_bt[sig_index], "sigs in sync");
+        if (at_this)
+          stream->print("  # this: ");
+        else
+          stream->print("  # parm%d: ", arg_index);
+        stream->move_to(tab1);
+        VMReg fst = regs[sig_index].first();
+        VMReg snd = regs[sig_index].second();
+        if (fst->is_reg()) {
+          stream->print("%s", fst->name());
+          if (snd->is_valid())  {
+            stream->print(":%s", snd->name());
+          }
+        } else if (fst->is_stack()) {
+          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
+          if (offset == stack_slot_offset)  at_old_sp = true;
+          stream->print("[%s+0x%x]", spname, offset);
+        } else {
+          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
+        }
+        stream->print(" ");
+        stream->move_to(tab2);
+        stream->print("= ");
+        if (at_this) {
+          m->method_holder()->print_value_on(stream);
+        } else {
+          bool did_name = false;
+          if (!at_this && ss.is_object()) {
+            Symbol* name = ss.as_symbol_or_null();
+            if (name != NULL) {
+              name->print_value_on(stream);
+              did_name = true;
+            }
+          }
+          if (!did_name)
+            stream->print("%s", type2name(t));
+        }
+        if (at_old_sp) {
+          stream->print("  (%s of caller)", spname);
+          did_old_sp = true;
+        }
+        stream->cr();
+        sig_index += type2size[t];
+        arg_index += 1;
+        if (!at_this)  ss.next();
+      }
+      if (!did_old_sp) {
+        stream->print("  # ");
+        stream->move_to(tab1);
+        stream->print("[%s+0x%x]", spname, stack_slot_offset);
+        stream->print("  (%s of caller)", spname);
+        stream->cr();
+      }
+    }
+  }
+}
+
+void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
+  // First, find an oopmap in (begin, end].
+  // We use the odd half-closed interval so that oop maps and scope descs
+  // which are tied to the byte after a call are printed with the call itself.
+  address base = code_begin();
+  OopMapSet* oms = oop_maps();
+  if (oms != NULL) {
+    for (int i = 0, imax = oms->size(); i < imax; i++) {
+      OopMap* om = oms->at(i);
+      address pc = base + om->offset();
+      if (pc > begin) {
+        if (pc <= end) {
+          st->move_to(column);
+          st->print("; ");
+          om->print_on(st);
+        }
+        break;
+      }
+    }
+  }
+
+  // Print any debug info present at this pc.
+  ScopeDesc* sd  = scope_desc_in(begin, end);
+  if (sd != NULL) {
+    st->move_to(column);
+    if (sd->bci() == SynchronizationEntryBCI) {
+      st->print(";*synchronization entry");
+    } else {
+      if (sd->method() == NULL) {
+        st->print("method is NULL");
+      } else if (sd->method()->is_native()) {
+        st->print("method is native");
+      } else {
+        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
+        st->print(";*%s", Bytecodes::name(bc));
+        switch (bc) {
+        case Bytecodes::_invokevirtual:
+        case Bytecodes::_invokespecial:
+        case Bytecodes::_invokestatic:
+        case Bytecodes::_invokeinterface:
+          {
+            Bytecode_invoke invoke(sd->method(), sd->bci());
+            st->print(" ");
+            if (invoke.name() != NULL)
+              invoke.name()->print_symbol_on(st);
+            else
+              st->print("<UNKNOWN>");
+            break;
+          }
+        case Bytecodes::_getfield:
+        case Bytecodes::_putfield:
+        case Bytecodes::_getstatic:
+        case Bytecodes::_putstatic:
+          {
+            Bytecode_field field(sd->method(), sd->bci());
+            st->print(" ");
+            if (field.name() != NULL)
+              field.name()->print_symbol_on(st);
+            else
+              st->print("<UNKNOWN>");
+          }
+        }
+      }
+    }
+
+    // Print all scopes
+    for (;sd != NULL; sd = sd->sender()) {
+      st->move_to(column);
+      st->print("; -");
+      if (sd->method() == NULL) {
+        st->print("method is NULL");
+      } else {
+        sd->method()->print_short_name(st);
+      }
+      int lineno = sd->method()->line_number_from_bci(sd->bci());
+      if (lineno != -1) {
+        st->print("@%d (line %d)", sd->bci(), lineno);
+      } else {
+        st->print("@%d", sd->bci());
+      }
+      st->cr();
+    }
+  }
+
+  // Print relocation information
+  const char* str = reloc_string_for(begin, end);
+  if (str != NULL) {
+    if (sd != NULL) st->cr();
+    st->move_to(column);
+    st->print(";   {%s}", str);
+  }
+  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
+  if (cont_offset != 0) {
+    st->move_to(column);
+    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
+  }
+
+}
+
+#ifndef PRODUCT
+
+void nmethod::print_value_on(outputStream* st) const {
+  st->print("nmethod");
+  print_on(st, NULL);
+}
+
+void nmethod::print_calls(outputStream* st) {
+  RelocIterator iter(this);
+  while (iter.next()) {
+    switch (iter.type()) {
+    case relocInfo::virtual_call_type:
+    case relocInfo::opt_virtual_call_type: {
+      VerifyMutexLocker mc(CompiledIC_lock);
+      CompiledIC_at(iter.reloc())->print();
+      break;
+    }
+    case relocInfo::static_call_type:
+      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
+      compiledStaticCall_at(iter.reloc())->print();
+      break;
+    }
+  }
+}
+
+void nmethod::print_handler_table() {
+  ExceptionHandlerTable(this).print();
+}
+
+void nmethod::print_nul_chk_table() {
+  ImplicitExceptionTable(this).print(code_begin());
+}
+
+#endif // PRODUCT
+
+void nmethod::print_statistics() {
+  ttyLocker ttyl;
+  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
+  nmethod_stats.print_native_nmethod_stats();
+  nmethod_stats.print_nmethod_stats();
+  DebugInformationRecorder::print_statistics();
+  nmethod_stats.print_pc_stats();
+  Dependencies::print_statistics();
+  if (xtty != NULL)  xtty->tail("statistics");
+}
+
+#ifdef GRAAL
+void DebugScopedNMethod::print_on(outputStream* st) {
+  if (_nm != NULL) {
+    st->print("nmethod@%p", _nm);
+    Method* method = _nm->method();
+    if (method != NULL) {
+      char holder[O_BUFLEN];
+      char nameAndSig[O_BUFLEN];
+      method->method_holder()->name()->as_C_string(holder, O_BUFLEN);
+      method->name_and_sig_as_C_string(nameAndSig, O_BUFLEN);
+      st->print(" - %s::%s", holder, nameAndSig);
+    }
+  }
+}
+#endif
--- a/src/share/vm/code/stubs.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/code/stubs.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -254,10 +254,9 @@
   guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
 }
 
-
-void StubQueue::print() {
+void StubQueue::print_on(outputStream* st) {
   MutexLockerEx lock(_mutex);
   for (Stub* s = first(); s != NULL; s = next(s)) {
-    stub_print(s);
+    stub_print(s, st);
   }
 }
--- a/src/share/vm/code/stubs.hpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/code/stubs.hpp	Tue Nov 27 12:12:02 2012 +0100
@@ -120,7 +120,7 @@
 
   // Debugging
   virtual void    verify(Stub* self)                       = 0; // verifies the stub
-  virtual void    print(Stub* self)                        = 0; // prints information about the stub
+  virtual void    print_on(Stub* self, outputStream* st)   = 0; // prints information about the stub
 };
 
 
@@ -149,7 +149,7 @@
                                                            \
     /* Debugging */                                        \
     virtual void    verify(Stub* self)                     { cast(self)->verify(); }               \
-    virtual void    print(Stub* self)                      { cast(self)->print(); }                \
+    virtual void    print_on(Stub* self, outputStream* st) { cast(self)->print_on(st); }           \
   };
 
 
@@ -182,7 +182,7 @@
   bool  stub_contains(Stub* s, address pc) const { return _stub_interface->code_begin(s) <= pc && pc < _stub_interface->code_end(s); }
   int   stub_code_size_to_size(int code_size) const { return _stub_interface->code_size_to_size(code_size); }
   void  stub_verify(Stub* s)                     { _stub_interface->verify(s); }
-  void  stub_print(Stub* s)                      { _stub_interface->print(s); }
+  void  stub_print(Stub* s, outputStream* st)    { _stub_interface->print_on(s, st); }
 
   static void register_queue(StubQueue*);
 
@@ -227,7 +227,8 @@
 
   // Debugging/printing
   void  verify();                                // verifies the stub queue
-  void  print();                                 // prints information about the stub queue
+  void  print()                                  { print_on(tty); }
+  void  print_on(outputStream* st);
 };
 
 #endif // SHARE_VM_CODE_STUBS_HPP
--- a/src/share/vm/compiler/disassembler.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/compiler/disassembler.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -1,550 +1,555 @@
-/*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/javaClasses.hpp"
-#include "code/codeCache.hpp"
-#include "compiler/disassembler.hpp"
-#include "gc_interface/collectedHeap.hpp"
-#include "memory/cardTableModRefBS.hpp"
-#include "runtime/fprofiler.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/stubCodeGenerator.hpp"
-#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_ARCH_x86
-# include "depChecker_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "depChecker_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "depChecker_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "depChecker_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "depChecker_ppc.hpp"
-#endif
-#ifdef SHARK
-#include "shark/sharkEntry.hpp"
-#endif
-
-void*       Disassembler::_library               = NULL;
-bool        Disassembler::_tried_to_load_library = false;
-
-// This routine is in the shared library:
-Disassembler::decode_func_virtual Disassembler::_decode_instructions_virtual = NULL;
-Disassembler::decode_func Disassembler::_decode_instructions = NULL;
-
-static const char hsdis_library_name[] = "hsdis-"HOTSPOT_LIB_ARCH;
-static const char decode_instructions_virtual_name[] = "decode_instructions_virtual";
-static const char decode_instructions_name[] = "decode_instructions";
-static bool use_new_version = true;
-#define COMMENT_COLUMN  40 LP64_ONLY(+8) /*could be an option*/
-#define BYTES_COMMENT   ";..."  /* funky byte display comment */
-
-bool Disassembler::load_library() {
-  if (_decode_instructions_virtual != NULL || _decode_instructions != NULL) {
-    // Already succeeded.
-    return true;
-  }
-  if (_tried_to_load_library) {
-    // Do not try twice.
-    // To force retry in debugger: assign _tried_to_load_library=0
-    return false;
-  }
-  // Try to load it.
-  char ebuf[1024];
-  char buf[JVM_MAXPATHLEN];
-  os::jvm_path(buf, sizeof(buf));
-  int jvm_offset = -1;
-  int lib_offset = -1;
-  {
-    // Match "jvm[^/]*" in jvm_path.
-    const char* base = buf;
-    const char* p = strrchr(buf, '/');
-    if (p != NULL) lib_offset = p - base + 1;
-    p = strstr(p ? p : base, "jvm");
-    if (p != NULL)  jvm_offset = p - base;
-  }
-  // Find the disassembler shared library.
-  // Search for several paths derived from libjvm, in this order:
-  // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so  (for compatibility)
-  // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
-  // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
-  // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
-  if (jvm_offset >= 0) {
-    // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
-    strcpy(&buf[jvm_offset], hsdis_library_name);
-    strcat(&buf[jvm_offset], os::dll_file_extension());
-    _library = os::dll_load(buf, ebuf, sizeof ebuf);
-    if (_library == NULL) {
-      // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
-      strcpy(&buf[lib_offset], hsdis_library_name);
-      strcat(&buf[lib_offset], os::dll_file_extension());
-      _library = os::dll_load(buf, ebuf, sizeof ebuf);
-    }
-    if (_library == NULL) {
-      // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
-      buf[lib_offset - 1] = '\0';
-      const char* p = strrchr(buf, '/');
-      if (p != NULL) {
-        lib_offset = p - buf + 1;
-        strcpy(&buf[lib_offset], hsdis_library_name);
-        strcat(&buf[lib_offset], os::dll_file_extension());
-        _library = os::dll_load(buf, ebuf, sizeof ebuf);
-      }
-    }
-  }
-  if (_library == NULL) {
-    // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
-    strcpy(&buf[0], hsdis_library_name);
-    strcat(&buf[0], os::dll_file_extension());
-    _library = os::dll_load(buf, ebuf, sizeof ebuf);
-  }
-  if (_library != NULL) {
-    _decode_instructions_virtual = CAST_TO_FN_PTR(Disassembler::decode_func_virtual,
-                                          os::dll_lookup(_library, decode_instructions_virtual_name));
-  }
-  if (_decode_instructions_virtual == NULL) {
-    // could not spot in new version, try old version
-    _decode_instructions = CAST_TO_FN_PTR(Disassembler::decode_func,
-                                          os::dll_lookup(_library, decode_instructions_name));
-    use_new_version = false;
-  } else {
-    use_new_version = true;
-  }
-  _tried_to_load_library = true;
-  if (_decode_instructions_virtual == NULL && _decode_instructions == NULL) {
-    tty->print_cr("Could not load %s; %s; %s", buf,
-                  ((_library != NULL)
-                   ? "entry point is missing"
-                   : (WizardMode || PrintMiscellaneous)
-                   ? (const char*)ebuf
-                   : "library not loadable"),
-                  "PrintAssembly is disabled");
-    return false;
-  }
-
-  // Success.
-  tty->print_cr("Loaded disassembler from %s", buf);
-  return true;
-}
-
-
-class decode_env {
- private:
-  nmethod*      _nm;
-  CodeBlob*     _code;
-  CodeComments  _comments;
-  outputStream* _output;
-  address       _start, _end;
-
-  char          _option_buf[512];
-  char          _print_raw;
-  bool          _print_pc;
-  bool          _print_bytes;
-  address       _cur_insn;
-  int           _total_ticks;
-  int           _bytes_per_line; // arch-specific formatting option
-
-  static bool match(const char* event, const char* tag) {
-    size_t taglen = strlen(tag);
-    if (strncmp(event, tag, taglen) != 0)
-      return false;
-    char delim = event[taglen];
-    return delim == '\0' || delim == ' ' || delim == '/' || delim == '=';
-  }
-
-  void collect_options(const char* p) {
-    if (p == NULL || p[0] == '\0')  return;
-    size_t opt_so_far = strlen(_option_buf);
-    if (opt_so_far + 1 + strlen(p) + 1 > sizeof(_option_buf))  return;
-    char* fillp = &_option_buf[opt_so_far];
-    if (opt_so_far > 0) *fillp++ = ',';
-    strcat(fillp, p);
-    // replace white space by commas:
-    char* q = fillp;
-    while ((q = strpbrk(q, " \t\n")) != NULL)
-      *q++ = ',';
-    // Note that multiple PrintAssemblyOptions flags accumulate with \n,
-    // which we want to be changed to a comma...
-  }
-
-  void print_insn_labels();
-  void print_insn_bytes(address pc0, address pc);
-  void print_address(address value);
-
- public:
-  decode_env(CodeBlob* code, outputStream* output, CodeComments c = CodeComments());
-
-  address decode_instructions(address start, address end);
-
-  void start_insn(address pc) {
-    _cur_insn = pc;
-    output()->bol();
-    print_insn_labels();
-  }
-
-  void end_insn(address pc) {
-    address pc0 = cur_insn();
-    outputStream* st = output();
-    if (_print_bytes && pc > pc0)
-      print_insn_bytes(pc0, pc);
-    if (_nm != NULL) {
-      _nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc);
-      // this calls reloc_string_for which calls oop::print_value_on
-    }
-
-    // Output pc bucket ticks if we have any
-    if (total_ticks() != 0) {
-      address bucket_pc = FlatProfiler::bucket_start_for(pc);
-      if (bucket_pc != NULL && bucket_pc > pc0 && bucket_pc <= pc) {
-        int bucket_count = FlatProfiler::bucket_count_for(pc0);
-        if (bucket_count != 0) {
-          st->bol();
-          st->print_cr("%3.1f%% [%d]", bucket_count*100.0/total_ticks(), bucket_count);
-        }
-      }
-    }
-    // follow each complete insn by a nice newline
-    st->cr();
-  }
-
-  address handle_event(const char* event, address arg);
-
-  outputStream* output() { return _output; }
-  address cur_insn() { return _cur_insn; }
-  int total_ticks() { return _total_ticks; }
-  void set_total_ticks(int n) { _total_ticks = n; }
-  const char* options() { return _option_buf; }
-};
-
-decode_env::decode_env(CodeBlob* code, outputStream* output, CodeComments c) {
-  memset(this, 0, sizeof(*this));
-  _output = output ? output : tty;
-  _code = code;
-  if (code != NULL && code->is_nmethod())
-    _nm = (nmethod*) code;
-  _comments.assign(c);
-
-  // by default, output pc but not bytes:
-  _print_pc       = true;
-  _print_bytes    = false;
-  _bytes_per_line = Disassembler::pd_instruction_alignment();
-
-  // parse the global option string:
-  collect_options(Disassembler::pd_cpu_opts());
-  collect_options(PrintAssemblyOptions);
-
-  if (strstr(options(), "hsdis-")) {
-    if (strstr(options(), "hsdis-print-raw"))
-      _print_raw = (strstr(options(), "xml") ? 2 : 1);
-    if (strstr(options(), "hsdis-print-pc"))
-      _print_pc = !_print_pc;
-    if (strstr(options(), "hsdis-print-bytes"))
-      _print_bytes = !_print_bytes;
-  }
-  if (strstr(options(), "help")) {
-    tty->print_cr("PrintAssemblyOptions help:");
-    tty->print_cr("  hsdis-print-raw       test plugin by requesting raw output");
-    tty->print_cr("  hsdis-print-raw-xml   test plugin by requesting raw xml");
-    tty->print_cr("  hsdis-print-pc        turn off PC printing (on by default)");
-    tty->print_cr("  hsdis-print-bytes     turn on instruction byte output");
-    tty->print_cr("combined options: %s", options());
-  }
-}
-
-address decode_env::handle_event(const char* event, address arg) {
-  if (match(event, "insn")) {
-    start_insn(arg);
-  } else if (match(event, "/insn")) {
-    end_insn(arg);
-  } else if (match(event, "addr")) {
-    if (arg != NULL) {
-      print_address(arg);
-      return arg;
-    }
-  } else if (match(event, "mach")) {
-    static char buffer[32] = { 0, };
-    if (strcmp(buffer, (const char*)arg) != 0 ||
-        strlen((const char*)arg) > sizeof(buffer) - 1) {
-      // Only print this when the mach changes
-      strncpy(buffer, (const char*)arg, sizeof(buffer) - 1);
-      output()->print_cr("[Disassembling for mach='%s']", arg);
-    }
-  } else if (match(event, "format bytes-per-line")) {
-    _bytes_per_line = (int) (intptr_t) arg;
-  } else {
-    // ignore unrecognized markup
-  }
-  return NULL;
-}
-
-// called by the disassembler to print out jump targets and data addresses
-void decode_env::print_address(address adr) {
-  outputStream* st = _output;
-
-  if (adr == NULL) {
-    st->print("NULL");
-    return;
-  }
-
-  int small_num = (int)(intptr_t)adr;
-  if ((intptr_t)adr == (intptr_t)small_num
-      && -1 <= small_num && small_num <= 9) {
-    st->print("%d", small_num);
-    return;
-  }
-
-  if (Universe::is_fully_initialized()) {
-    if (StubRoutines::contains(adr)) {
-      StubCodeDesc* desc = StubCodeDesc::desc_for(adr);
-      if (desc == NULL)
-        desc = StubCodeDesc::desc_for(adr + frame::pc_return_offset);
-      if (desc != NULL) {
-        st->print("Stub::%s", desc->name());
-        if (desc->begin() != adr)
-          st->print("%+d 0x%p",adr - desc->begin(), adr);
-        else if (WizardMode) st->print(" " PTR_FORMAT, adr);
-        return;
-      }
-      st->print("Stub::<unknown> " PTR_FORMAT, adr);
-      return;
-    }
-
-    BarrierSet* bs = Universe::heap()->barrier_set();
-    if (bs->kind() == BarrierSet::CardTableModRef &&
-        adr == (address)((CardTableModRefBS*)(bs))->byte_map_base) {
-      st->print("word_map_base");
-      if (WizardMode) st->print(" " INTPTR_FORMAT, (intptr_t)adr);
-      return;
-    }
-
-    oop obj;
-    if (_nm != NULL
-        && (obj = _nm->embeddedOop_at(cur_insn())) != NULL
-        && (address) obj == adr
-        && Universe::heap()->is_in(obj)
-        && Universe::heap()->is_in(obj->klass())) {
-      julong c = st->count();
-      obj->print_value_on(st);
-      if (st->count() == c) {
-        // No output.  (Can happen in product builds.)
-        st->print("(a %s)", obj->klass()->external_name());
-      }
-      return;
-    }
-  }
-
-  // Fall through to a simple (hexadecimal) numeral.
-  st->print(PTR_FORMAT, adr);
-}
-
-void decode_env::print_insn_labels() {
-  address p = cur_insn();
-  outputStream* st = output();
-  CodeBlob* cb = _code;
-  if (cb != NULL) {
-    cb->print_block_comment(st, p);
-  }
-  _comments.print_block_comment(st, (intptr_t)(p - _start));
-  if (_print_pc) {
-    st->print("  " PTR_FORMAT ": ", p);
-  }
-}
-
-void decode_env::print_insn_bytes(address pc, address pc_limit) {
-  outputStream* st = output();
-  size_t incr = 1;
-  size_t perline = _bytes_per_line;
-  if ((size_t) Disassembler::pd_instruction_alignment() >= sizeof(int)
-      && !((uintptr_t)pc % sizeof(int))
-      && !((uintptr_t)pc_limit % sizeof(int))) {
-    incr = sizeof(int);
-    if (perline % incr)  perline += incr - (perline % incr);
-  }
-  while (pc < pc_limit) {
-    // tab to the desired column:
-    st->move_to(COMMENT_COLUMN);
-    address pc0 = pc;
-    address pc1 = pc + perline;
-    if (pc1 > pc_limit)  pc1 = pc_limit;
-    for (; pc < pc1; pc += incr) {
-      if (pc == pc0)
-        st->print(BYTES_COMMENT);
-      else if ((uint)(pc - pc0) % sizeof(int) == 0)
-        st->print(" ");         // put out a space on word boundaries
-      if (incr == sizeof(int))
-            st->print("%08lx", *(int*)pc);
-      else  st->print("%02x",   (*pc)&0xFF);
-    }
-    st->cr();
-  }
-}
-
-
-static void* event_to_env(void* env_pv, const char* event, void* arg) {
-  decode_env* env = (decode_env*) env_pv;
-  return env->handle_event(event, (address) arg);
-}
-
-static int printf_to_env(void* env_pv, const char* format, ...) {
-  decode_env* env = (decode_env*) env_pv;
-  outputStream* st = env->output();
-  size_t flen = strlen(format);
-  const char* raw = NULL;
-  if (flen == 0)  return 0;
-  if (flen == 1 && format[0] == '\n') { st->bol(); return 1; }
-  if (flen < 2 ||
-      strchr(format, '%') == NULL) {
-    raw = format;
-  } else if (format[0] == '%' && format[1] == '%' &&
-             strchr(format+2, '%') == NULL) {
-    // happens a lot on machines with names like %foo
-    flen--;
-    raw = format+1;
-  }
-  if (raw != NULL) {
-    st->print_raw(raw, (int) flen);
-    return (int) flen;
-  }
-  va_list ap;
-  va_start(ap, format);
-  julong cnt0 = st->count();
-  st->vprint(format, ap);
-  julong cnt1 = st->count();
-  va_end(ap);
-  return (int)(cnt1 - cnt0);
-}
-
-address decode_env::decode_instructions(address start, address end) {
-  _start = start; _end = end;
-
-  assert(((((intptr_t)start | (intptr_t)end) % Disassembler::pd_instruction_alignment()) == 0), "misaligned insn addr");
-
-  const int show_bytes = false; // for disassembler debugging
-
-  //_version = Disassembler::pd_cpu_version();
-
-  if (!Disassembler::can_decode()) {
-    return NULL;
-  }
-
-  // decode a series of instructions and return the end of the last instruction
-
-  if (_print_raw) {
-    // Print whatever the library wants to print, w/o fancy callbacks.
-    // This is mainly for debugging the library itself.
-    FILE* out = stdout;
-    FILE* xmlout = (_print_raw > 1 ? out : NULL);
-    return use_new_version ?
-      (address)
-      (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
-                                                    start, end - start,
-                                                    NULL, (void*) xmlout,
-                                                    NULL, (void*) out,
-                                                    options(), 0/*nice new line*/)
-      :
-      (address)
-      (*Disassembler::_decode_instructions)(start, end,
-                                            NULL, (void*) xmlout,
-                                            NULL, (void*) out,
-                                            options());
-  }
-
-  return use_new_version ?
-    (address)
-    (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
-                                                  start, end - start,
-                                                  &event_to_env,  (void*) this,
-                                                  &printf_to_env, (void*) this,
-                                                  options(), 0/*nice new line*/)
-    :
-    (address)
-    (*Disassembler::_decode_instructions)(start, end,
-                                          &event_to_env,  (void*) this,
-                                          &printf_to_env, (void*) this,
-                                          options());
-}
-
-
-void Disassembler::decode(CodeBlob* cb, outputStream* st) {
-  if (!load_library())  return;
-  decode_env env(cb, st);
-  env.output()->print_cr("Decoding CodeBlob " PTR_FORMAT, cb);
-  env.decode_instructions(cb->code_begin(), cb->code_end());
-}
-
-void Disassembler::decode(address start, address end, outputStream* st, CodeComments c) {
-  if (!load_library())  return;
-  decode_env env(CodeCache::find_blob_unsafe(start), st, c);
-  env.decode_instructions(start, end);
-}
-
-void Disassembler::decode(nmethod* nm, outputStream* st) {
-  if (!load_library())  return;
-  decode_env env(nm, st);
-  env.output()->print_cr("Decoding compiled method " PTR_FORMAT ":", nm);
-  env.output()->print_cr("Code:");
-
-#ifdef SHARK
-  SharkEntry* entry = (SharkEntry *) nm->code_begin();
-  unsigned char* p   = entry->code_start();
-  unsigned char* end = entry->code_limit();
-#else
-  unsigned char* p   = nm->code_begin();
-  unsigned char* end = nm->code_end();
-#endif // SHARK
-
-  // If there has been profiling, print the buckets.
-  if (FlatProfiler::bucket_start_for(p) != NULL) {
-    unsigned char* p1 = p;
-    int total_bucket_count = 0;
-    while (p1 < end) {
-      unsigned char* p0 = p1;
-      p1 += pd_instruction_alignment();
-      address bucket_pc = FlatProfiler::bucket_start_for(p1);
-      if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p1)
-        total_bucket_count += FlatProfiler::bucket_count_for(p0);
-    }
-    env.set_total_ticks(total_bucket_count);
-  }
-
-  // Print constant table.
-  if (nm->consts_size() > 0) {
-    nm->print_nmethod_labels(env.output(), nm->consts_begin());
-    int offset = 0;
-    for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) {
-      if ((offset % 8) == 0) {
-        env.output()->print_cr("  " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT "   " PTR64_FORMAT, p, offset, *((int32_t*) p), *((int64_t*) p));
-      } else {
-        env.output()->print_cr("  " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT,                    p, offset, *((int32_t*) p));
-      }
-    }
-  }
-
-  env.decode_instructions(p, end);
-}
+/*
+ * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "code/codeCache.hpp"
+#include "compiler/disassembler.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "runtime/fprofiler.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/stubRoutines.hpp"
+#ifdef TARGET_ARCH_x86
+# include "depChecker_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "depChecker_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "depChecker_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "depChecker_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "depChecker_ppc.hpp"
+#endif
+#ifdef SHARK
+#include "shark/sharkEntry.hpp"
+#endif
+
+void*       Disassembler::_library               = NULL;
+bool        Disassembler::_tried_to_load_library = false;
+
+// This routine is in the shared library:
+Disassembler::decode_func_virtual Disassembler::_decode_instructions_virtual = NULL;
+Disassembler::decode_func Disassembler::_decode_instructions = NULL;
+
+static const char hsdis_library_name[] = "hsdis-"HOTSPOT_LIB_ARCH;
+static const char decode_instructions_virtual_name[] = "decode_instructions_virtual";
+static const char decode_instructions_name[] = "decode_instructions";
+static bool use_new_version = true;
+#define COMMENT_COLUMN  40 LP64_ONLY(+8) /*could be an option*/
+#define BYTES_COMMENT   ";..."  /* funky byte display comment */
+
+bool Disassembler::load_library() {
+  if (_decode_instructions_virtual != NULL || _decode_instructions != NULL) {
+    // Already succeeded.
+    return true;
+  }
+  if (_tried_to_load_library) {
+    // Do not try twice.
+    // To force retry in debugger: assign _tried_to_load_library=0
+    return false;
+  }
+  // Try to load it.
+  char ebuf[1024];
+  char buf[JVM_MAXPATHLEN];
+  os::jvm_path(buf, sizeof(buf));
+  int jvm_offset = -1;
+  int lib_offset = -1;
+  {
+    // Match "jvm[^/]*" in jvm_path.
+    const char* base = buf;
+    const char* p = strrchr(buf, '/');
+    if (p != NULL) lib_offset = p - base + 1;
+    p = strstr(p ? p : base, "jvm");
+    if (p != NULL)  jvm_offset = p - base;
+  }
+  // Find the disassembler shared library.
+  // Search for several paths derived from libjvm, in this order:
+  // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so  (for compatibility)
+  // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+  // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+  // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
+  if (jvm_offset >= 0) {
+    // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
+    strcpy(&buf[jvm_offset], hsdis_library_name);
+    strcat(&buf[jvm_offset], os::dll_file_extension());
+    _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    if (_library == NULL) {
+      // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+      strcpy(&buf[lib_offset], hsdis_library_name);
+      strcat(&buf[lib_offset], os::dll_file_extension());
+      _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    }
+    if (_library == NULL) {
+      // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+      buf[lib_offset - 1] = '\0';
+      const char* p = strrchr(buf, '/');
+      if (p != NULL) {
+        lib_offset = p - buf + 1;
+        strcpy(&buf[lib_offset], hsdis_library_name);
+        strcat(&buf[lib_offset], os::dll_file_extension());
+        _library = os::dll_load(buf, ebuf, sizeof ebuf);
+      }
+    }
+  }
+  if (_library == NULL) {
+    // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
+    strcpy(&buf[0], hsdis_library_name);
+    strcat(&buf[0], os::dll_file_extension());
+    _library = os::dll_load(buf, ebuf, sizeof ebuf);
+  }
+  if (_library != NULL) {
+    _decode_instructions_virtual = CAST_TO_FN_PTR(Disassembler::decode_func_virtual,
+                                          os::dll_lookup(_library, decode_instructions_virtual_name));
+  }
+  if (_decode_instructions_virtual == NULL) {
+    // could not spot in new version, try old version
+    _decode_instructions = CAST_TO_FN_PTR(Disassembler::decode_func,
+                                          os::dll_lookup(_library, decode_instructions_name));
+    use_new_version = false;
+  } else {
+    use_new_version = true;
+  }
+  _tried_to_load_library = true;
+  if (_decode_instructions_virtual == NULL && _decode_instructions == NULL) {
+    tty->print_cr("Could not load %s; %s; %s", buf,
+                  ((_library != NULL)
+                   ? "entry point is missing"
+                   : (WizardMode || PrintMiscellaneous)
+                   ? (const char*)ebuf
+                   : "library not loadable"),
+                  "PrintAssembly is disabled");
+    return false;
+  }
+
+  // Success.
+  tty->print_cr("Loaded disassembler from %s", buf);
+  return true;
+}
+
+
+class decode_env {
+ private:
+  nmethod*      _nm;
+  CodeBlob*     _code;
+  CodeComments  _comments;
+  outputStream* _output;
+  address       _start, _end;
+
+  char          _option_buf[512];
+  char          _print_raw;
+  bool          _print_pc;
+  bool          _print_bytes;
+  address       _cur_insn;
+  int           _total_ticks;
+  int           _bytes_per_line; // arch-specific formatting option
+
+  static bool match(const char* event, const char* tag) {
+    size_t taglen = strlen(tag);
+    if (strncmp(event, tag, taglen) != 0)
+      return false;
+    char delim = event[taglen];
+    return delim == '\0' || delim == ' ' || delim == '/' || delim == '=';
+  }
+
+  void collect_options(const char* p) {
+    if (p == NULL || p[0] == '\0')  return;
+    size_t opt_so_far = strlen(_option_buf);
+    if (opt_so_far + 1 + strlen(p) + 1 > sizeof(_option_buf))  return;
+    char* fillp = &_option_buf[opt_so_far];
+    if (opt_so_far > 0) *fillp++ = ',';
+    strcat(fillp, p);
+    // replace white space by commas:
+    char* q = fillp;
+    while ((q = strpbrk(q, " \t\n")) != NULL)
+      *q++ = ',';
+    // Note that multiple PrintAssemblyOptions flags accumulate with \n,
+    // which we want to be changed to a comma...
+  }
+
+  void print_insn_labels();
+  void print_insn_bytes(address pc0, address pc);
+  void print_address(address value);
+
+ public:
+  decode_env(CodeBlob* code, outputStream* output, CodeComments c = CodeComments());
+
+  address decode_instructions(address start, address end);
+
+  void start_insn(address pc) {
+    _cur_insn = pc;
+    output()->bol();
+    print_insn_labels();
+  }
+
+  void end_insn(address pc) {
+    address pc0 = cur_insn();
+    outputStream* st = output();
+    if (_print_bytes && pc > pc0)
+      print_insn_bytes(pc0, pc);
+    if (_nm != NULL) {
+      _nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc);
+      // this calls reloc_string_for which calls oop::print_value_on
+    }
+
+    // Output pc bucket ticks if we have any
+    if (total_ticks() != 0) {
+      address bucket_pc = FlatProfiler::bucket_start_for(pc);
+      if (bucket_pc != NULL && bucket_pc > pc0 && bucket_pc <= pc) {
+        int bucket_count = FlatProfiler::bucket_count_for(pc0);
+        if (bucket_count != 0) {
+          st->bol();
+          st->print_cr("%3.1f%% [%d]", bucket_count*100.0/total_ticks(), bucket_count);
+        }
+      }
+    }
+    // follow each complete insn by a nice newline
+    st->cr();
+  }
+
+  address handle_event(const char* event, address arg);
+
+  outputStream* output() { return _output; }
+  address cur_insn() { return _cur_insn; }
+  int total_ticks() { return _total_ticks; }
+  void set_total_ticks(int n) { _total_ticks = n; }
+  const char* options() { return _option_buf; }
+};
+
+decode_env::decode_env(CodeBlob* code, outputStream* output, CodeComments c) {
+  memset(this, 0, sizeof(*this));
+  _output = output ? output : tty;
+  _code = code;
+  if (code != NULL && code->is_nmethod())
+    _nm = (nmethod*) code;
+  _comments.assign(c);
+
+  // by default, output pc but not bytes:
+  _print_pc       = true;
+  _print_bytes    = false;
+  _bytes_per_line = Disassembler::pd_instruction_alignment();
+
+  // parse the global option string:
+  collect_options(Disassembler::pd_cpu_opts());
+  collect_options(PrintAssemblyOptions);
+
+  if (strstr(options(), "hsdis-")) {
+    if (strstr(options(), "hsdis-print-raw"))
+      _print_raw = (strstr(options(), "xml") ? 2 : 1);
+    if (strstr(options(), "hsdis-print-pc"))
+      _print_pc = !_print_pc;
+    if (strstr(options(), "hsdis-print-bytes"))
+      _print_bytes = !_print_bytes;
+  }
+  if (strstr(options(), "help")) {
+    tty->print_cr("PrintAssemblyOptions help:");
+    tty->print_cr("  hsdis-print-raw       test plugin by requesting raw output");
+    tty->print_cr("  hsdis-print-raw-xml   test plugin by requesting raw xml");
+    tty->print_cr("  hsdis-print-pc        turn off PC printing (on by default)");
+    tty->print_cr("  hsdis-print-bytes     turn on instruction byte output");
+    tty->print_cr("combined options: %s", options());
+  }
+}
+
+address decode_env::handle_event(const char* event, address arg) {
+  if (match(event, "insn")) {
+    start_insn(arg);
+  } else if (match(event, "/insn")) {
+    end_insn(arg);
+  } else if (match(event, "addr")) {
+    if (arg != NULL) {
+      print_address(arg);
+      return arg;
+    }
+  } else if (match(event, "mach")) {
+    static char buffer[32] = { 0, };
+    if (strcmp(buffer, (const char*)arg) != 0 ||
+        strlen((const char*)arg) > sizeof(buffer) - 1) {
+      // Only print this when the mach changes
+      strncpy(buffer, (const char*)arg, sizeof(buffer) - 1);
+      output()->print_cr("[Disassembling for mach='%s']", arg);
+    }
+  } else if (match(event, "format bytes-per-line")) {
+    _bytes_per_line = (int) (intptr_t) arg;
+  } else {
+    // ignore unrecognized markup
+  }
+  return NULL;
+}
+
+// called by the disassembler to print out jump targets and data addresses
+void decode_env::print_address(address adr) {
+  outputStream* st = _output;
+
+  if (adr == NULL) {
+    st->print("NULL");
+    return;
+  }
+
+  int small_num = (int)(intptr_t)adr;
+  if ((intptr_t)adr == (intptr_t)small_num
+      && -1 <= small_num && small_num <= 9) {
+    st->print("%d", small_num);
+    return;
+  }
+
+  if (Universe::is_fully_initialized()) {
+    if (StubRoutines::contains(adr)) {
+      StubCodeDesc* desc = StubCodeDesc::desc_for(adr);
+      if (desc == NULL)
+        desc = StubCodeDesc::desc_for(adr + frame::pc_return_offset);
+      if (desc != NULL) {
+        st->print("Stub::%s", desc->name());
+        if (desc->begin() != adr)
+          st->print("%+d 0x%p",adr - desc->begin(), adr);
+        else if (WizardMode) st->print(" " PTR_FORMAT, adr);
+        return;
+      }
+      st->print("Stub::<unknown> " PTR_FORMAT, adr);
+      return;
+    }
+
+    BarrierSet* bs = Universe::heap()->barrier_set();
+    if (bs->kind() == BarrierSet::CardTableModRef &&
+        adr == (address)((CardTableModRefBS*)(bs))->byte_map_base) {
+      st->print("word_map_base");
+      if (WizardMode) st->print(" " INTPTR_FORMAT, (intptr_t)adr);
+      return;
+    }
+
+    oop obj;
+    if (_nm != NULL
+        && (obj = _nm->embeddedOop_at(cur_insn())) != NULL
+        && (address) obj == adr
+        && Universe::heap()->is_in(obj)
+        && Universe::heap()->is_in(obj->klass())) {
+      julong c = st->count();
+      obj->print_value_on(st);
+      if (st->count() == c) {
+        // No output.  (Can happen in product builds.)
+        st->print("(a %s)", obj->klass()->external_name());
+      }
+      return;
+    }
+  }
+
+  // Fall through to a simple (hexadecimal) numeral.
+  st->print(PTR_FORMAT, adr);
+}
+
+void decode_env::print_insn_labels() {
+  address p = cur_insn();
+  outputStream* st = output();
+  CodeBlob* cb = _code;
+  if (cb != NULL) {
+    cb->print_block_comment(st, p);
+  }
+  _comments.print_block_comment(st, (intptr_t)(p - _start));
+  if (_print_pc) {
+    st->print("  " PTR_FORMAT ": ", p);
+  }
+}
+
+void decode_env::print_insn_bytes(address pc, address pc_limit) {
+  outputStream* st = output();
+  size_t incr = 1;
+  size_t perline = _bytes_per_line;
+  if ((size_t) Disassembler::pd_instruction_alignment() >= sizeof(int)
+      && !((uintptr_t)pc % sizeof(int))
+      && !((uintptr_t)pc_limit % sizeof(int))) {
+    incr = sizeof(int);
+    if (perline % incr)  perline += incr - (perline % incr);
+  }
+  while (pc < pc_limit) {
+    // tab to the desired column:
+    st->move_to(COMMENT_COLUMN);
+    address pc0 = pc;
+    address pc1 = pc + perline;
+    if (pc1 > pc_limit)  pc1 = pc_limit;
+    for (; pc < pc1; pc += incr) {
+      if (pc == pc0)
+        st->print(BYTES_COMMENT);
+      else if ((uint)(pc - pc0) % sizeof(int) == 0)
+        st->print(" ");         // put out a space on word boundaries
+      if (incr == sizeof(int))
+            st->print("%08lx", *(int*)pc);
+      else  st->print("%02x",   (*pc)&0xFF);
+    }
+    st->cr();
+  }
+}
+
+
+static void* event_to_env(void* env_pv, const char* event, void* arg) {
+  decode_env* env = (decode_env*) env_pv;
+  return env->handle_event(event, (address) arg);
+}
+
+static int printf_to_env(void* env_pv, const char* format, ...) {
+  decode_env* env = (decode_env*) env_pv;
+  outputStream* st = env->output();
+  size_t flen = strlen(format);
+  const char* raw = NULL;
+  if (flen == 0)  return 0;
+  if (flen == 1 && format[0] == '\n') { st->bol(); return 1; }
+  if (flen < 2 ||
+      strchr(format, '%') == NULL) {
+    raw = format;
+  } else if (format[0] == '%' && format[1] == '%' &&
+             strchr(format+2, '%') == NULL) {
+    // happens a lot on machines with names like %foo
+    flen--;
+    raw = format+1;
+  }
+  if (raw != NULL) {
+    st->print_raw(raw, (int) flen);
+    return (int) flen;
+  }
+  va_list ap;
+  va_start(ap, format);
+  julong cnt0 = st->count();
+  st->vprint(format, ap);
+  julong cnt1 = st->count();
+  va_end(ap);
+  return (int)(cnt1 - cnt0);
+}
+
+address decode_env::decode_instructions(address start, address end) {
+  _start = start; _end = end;
+
+  assert(((((intptr_t)start | (intptr_t)end) % Disassembler::pd_instruction_alignment()) == 0), "misaligned insn addr");
+
+  const int show_bytes = false; // for disassembler debugging
+
+  //_version = Disassembler::pd_cpu_version();
+
+  if (!Disassembler::can_decode()) {
+    return NULL;
+  }
+
+  // decode a series of instructions and return the end of the last instruction
+
+  if (_print_raw) {
+    // Print whatever the library wants to print, w/o fancy callbacks.
+    // This is mainly for debugging the library itself.
+    FILE* out = stdout;
+    FILE* xmlout = (_print_raw > 1 ? out : NULL);
+    return use_new_version ?
+      (address)
+      (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
+                                                    start, end - start,
+                                                    NULL, (void*) xmlout,
+                                                    NULL, (void*) out,
+                                                    options(), 0/*nice new line*/)
+      :
+      (address)
+      (*Disassembler::_decode_instructions)(start, end,
+                                            NULL, (void*) xmlout,
+                                            NULL, (void*) out,
+                                            options());
+  }
+
+  return use_new_version ?
+    (address)
+    (*Disassembler::_decode_instructions_virtual)((uintptr_t)start, (uintptr_t)end,
+                                                  start, end - start,
+                                                  &event_to_env,  (void*) this,
+                                                  &printf_to_env, (void*) this,
+                                                  options(), 0/*nice new line*/)
+    :
+    (address)
+    (*Disassembler::_decode_instructions)(start, end,
+                                          &event_to_env,  (void*) this,
+                                          &printf_to_env, (void*) this,
+                                          options());
+}
+
+
+void Disassembler::decode(CodeBlob* cb, outputStream* st) {
+  if (!load_library())  return;
+  decode_env env(cb, st);
+  env.output()->print_cr("----------------------------------------------------------------------");
+  env.output()->print_cr("%s at  [" PTR_FORMAT ", " PTR_FORMAT "]  %d bytes", cb->name(), cb->code_begin(), cb->code_end(), ((jlong)(cb->code_end() - cb->code_begin())) * sizeof(unsigned char*));
+  env.decode_instructions(cb->code_begin(), cb->code_end());
+}
+
+void Disassembler::decode(address start, address end, outputStream* st, CodeComments c) {
+  if (!load_library())  return;
+  decode_env env(CodeCache::find_blob_unsafe(start), st, c);
+  env.decode_instructions(start, end);
+}
+
+void Disassembler::decode(nmethod* nm, outputStream* st) {
+  if (!load_library())  return;
+  decode_env env(nm, st);
+  env.output()->print_cr("----------------------------------------------------------------------");
+
+#ifdef SHARK
+  SharkEntry* entry = (SharkEntry *) nm->code_begin();
+  unsigned char* p   = entry->code_start();
+  unsigned char* end = entry->code_limit();
+#else
+  unsigned char* p   = nm->code_begin();
+  unsigned char* end = nm->code_end();
+#endif // SHARK
+
+  nm->method()->method_holder()->name()->print_symbol_on(env.output());
+  env.output()->print(".");
+  nm->method()->name()->print_symbol_on(env.output());
+  env.output()->print_cr("  [" PTR_FORMAT ", " PTR_FORMAT "]  %d bytes", p, end, ((jlong)(end - p)) * sizeof(unsigned char*));
+
+  // If there has been profiling, print the buckets.
+  if (FlatProfiler::bucket_start_for(p) != NULL) {
+    unsigned char* p1 = p;
+    int total_bucket_count = 0;
+    while (p1 < end) {
+      unsigned char* p0 = p1;
+      p1 += pd_instruction_alignment();
+      address bucket_pc = FlatProfiler::bucket_start_for(p1);
+      if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p1)
+        total_bucket_count += FlatProfiler::bucket_count_for(p0);
+    }
+    env.set_total_ticks(total_bucket_count);
+  }
+
+  // Print constant table.
+  if (nm->consts_size() > 0) {
+    nm->print_nmethod_labels(env.output(), nm->consts_begin());
+    int offset = 0;
+    for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) {
+      if ((offset % 8) == 0) {
+        env.output()->print_cr("  " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT "   " PTR64_FORMAT, p, offset, *((int32_t*) p), *((int64_t*) p));
+      } else {
+        env.output()->print_cr("  " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT,                    p, offset, *((int32_t*) p));
+      }
+    }
+  }
+
+  env.decode_instructions(p, end);
+}
--- a/src/share/vm/compiler/oopMap.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/compiler/oopMap.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -545,7 +545,7 @@
     st->print("Value" );
     break;
   case OopMapValue::narrowoop_value:
-    tty->print("NarrowOop" );
+    st->print("NarrowOop" );
     break;
   case OopMapValue::callee_saved_value:
     st->print("Callers_" );
--- a/src/share/vm/graal/graalCodeInstaller.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/graal/graalCodeInstaller.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -679,7 +679,7 @@
 void CodeInstaller::site_DataPatch(CodeBuffer& buffer, jint pc_offset, oop site) {
   oop constant = CompilationResult_DataPatch::constant(site);
   int alignment = CompilationResult_DataPatch::alignment(site);
-  bool inlined = CompilationResult_DataPatch::inlined(site);
+  bool inlined = CompilationResult_DataPatch::inlined(site) == JNI_TRUE;
   oop kind = Constant::kind(constant);
 
   address instruction = _instructions->start() + pc_offset;
--- a/src/share/vm/graal/graalEnv.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/graal/graalEnv.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -177,7 +177,7 @@
     {
       // We have to lock the cpool to keep the oop from being resolved
       // while we are accessing it.
-      MutexLockerEx ml(cpool->lock(), THREAD);
+      MutexLockerEx ml(cpool->lock(), Mutex::_no_safepoint_check_flag);
 
       constantTag tag = cpool->tag_at(index);
       if (tag.is_klass()) {
--- a/src/share/vm/graal/graalInterpreterToVM.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "prims/jni.h"
-#include "runtime/javaCalls.hpp"
-#include "memory/oopFactory.hpp"
-#include "graal/graalInterpreterToVM.hpp"
-#include "graal/graalCompiler.hpp"
-#include "graal/graalCompilerToVM.hpp"
-
-#ifdef HIGH_LEVEL_INTERPRETER
-
-// public Object invoke(HotSpotResolvedJavaMethod method, boolean highLevel, Object... args);
-JNIEXPORT jobject JNICALL Java_com_oracle_graal_hotspot_HotSpotRuntimeInterpreterInterface_invoke(JNIEnv *env, jobject, jobject method, jobject args) {
-  TRACE_graal_3("InterpreterToVM::invoke");
-
-  VM_ENTRY_MARK;
-  HandleMark hm;
-  
-  assert(method != NULL, "just checking");
-  assert(thread->is_Java_thread(), "must be");
-  methodHandle mh = getMethodFromHotSpotMethod(method);
-    
-  JavaCallArguments jca;
-  JavaArgumentUnboxer jap(mh->signature(), &jca, (arrayOop) JNIHandles::resolve(args), mh->is_static());
-  
-#ifndef PRODUCT
-  if (PrintHighLevelInterpreterVMTransitions) {
-    ResourceMark rm;
-    tty->print_cr("High level interpreter -> VM (%s)", mh->name_and_sig_as_C_string());
-  }
-#endif
-
-  JavaValue result(jap.get_ret_type());
-  thread->set_high_level_interpreter_in_vm(true);
-  JavaCalls::call(&result, mh, &jca, THREAD);
-  thread->set_high_level_interpreter_in_vm(false);
-
-#ifndef PRODUCT
-  if (PrintHighLevelInterpreterVMTransitions) {
-    ResourceMark rm;
-    tty->print_cr("VM (%s) -> high level interpreter", mh->name_and_sig_as_C_string());
-  }
-#endif
-
-  if (thread->has_pending_exception()) {
-    return NULL;
-  }
-
-  if (jap.get_ret_type() == T_VOID) {
-    return NULL;
-  } else if (jap.get_ret_type() == T_OBJECT || jap.get_ret_type() == T_ARRAY) {
-    return JNIHandles::make_local((oop) result.get_jobject());
-  } else {
-    oop o = java_lang_boxing_object::create(jap.get_ret_type(), (jvalue *) result.get_value_addr(), CHECK_NULL);
-    return JNIHandles::make_local(o);
-  }
-}
-
-#define CC (char*)  /*cast a literal from (const char*)*/
-#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &(Java_com_oracle_graal_hotspot_HotSpotRuntimeInterpreterInterface_##f))
-
-#define RESOLVED_METHOD "Lcom/oracle/graal/api/meta/ResolvedJavaMethod;"
-#define OBJECT          "Ljava/lang/Object;"
-
-JNINativeMethod InterpreterToVM_methods[] = {
-  {CC"invoke",                     CC"("RESOLVED_METHOD "["OBJECT")"OBJECT,     FN_PTR(invoke)}
-};
-
-int InterpreterToVM_methods_count() {
-  return sizeof(InterpreterToVM_methods) / sizeof(JNINativeMethod);
-}
-
-#endif // HIGH_LEVEL_INTERPRETER
--- a/src/share/vm/graal/graalInterpreterToVM.hpp	Mon Nov 26 19:30:54 2012 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifdef HIGH_LEVEL_INTERPRETER
-#ifndef SHARE_VM_GRAAL_GRAAL_INTERPRETER_TO_VM_HPP
-#define SHARE_VM_GRAAL_GRAAL_INTERPRETER_TO_VM_HPP
-
-#include "prims/jni.h"
-
-extern JNINativeMethod InterpreterToVM_methods[];
-int InterpreterToVM_methods_count();
-
-// nothing here - no need to define the jni method implementations in a header file
-
-#endif // SHARE_VM_GRAAL_GRAAL_INTERPRETER_TO_VM_HPP
-#endif // HIGH_LEVEL_INTERPRETER
--- a/src/share/vm/graal/graalVMToInterpreter.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,280 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "graal/graalVMToInterpreter.hpp"
-#include "graal/graalInterpreterToVM.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "classfile/vmSymbols.hpp"
-#include "graal/graalCompiler.hpp"
-
-#ifdef HIGH_LEVEL_INTERPRETER
-
-// those are *global* handles
-jobject VMToInterpreter::_interpreterPermObject = NULL;
-jobject VMToInterpreter::_interpreterPermKlass = NULL;
-
-class JavaArgumentBoxer : public SignatureIterator {
- protected:
-  JavaCallArguments* _args;
-  Thread* _thread;
-  objArrayHandle _obj_array;
-  int _index;
-  int _position;
-
- public:
-  JavaArgumentBoxer(Symbol* signature, objArrayHandle obj_array, JavaCallArguments* args, bool is_static, TRAPS) : SignatureIterator(signature) {
-    _obj_array = obj_array;
-    _args = args;
-    _index = _position = 0;
-    _thread = THREAD;
-    if (!is_static) {
-      push(next_object(T_OBJECT));
-    }
-    iterate();
-    assert(_index == _obj_array->length(), "arg count mismatch with signature");
-  }
-
-  inline void do_bool()   {
-    if (!is_return_type()) {
-      jvalue value;
-      value.z = (jboolean)_args->get_int(_position);
-      push(java_lang_boxing_object::create(T_BOOLEAN, &value, _thread));
-    }
-  }
-  inline void do_char()   {
-    if (!is_return_type()) {
-      jvalue value;
-      value.c = (jchar)_args->get_int(_position);
-      push(java_lang_boxing_object::create(T_CHAR, &value, _thread));
-    }
-  }
-  inline void do_short()  {
-    if (!is_return_type()) {
-      jvalue value;
-      value.s = (jshort)_args->get_int(_position);
-      push(java_lang_boxing_object::create(T_SHORT, &value, _thread));
-    }
-  }
-  inline void do_byte()   {
-    if (!is_return_type()) {
-      jvalue value;
-      value.b = (jbyte)_args->get_int(_position);
-      push(java_lang_boxing_object::create(T_BYTE, &value, _thread));
-    }
-  }
-  inline void do_int()    {
-    if (!is_return_type()) {
-      jvalue value;
-      value.i = (jint)_args->get_int(_position);
-      push(java_lang_boxing_object::create(T_INT, &value, _thread));
-    }
-  }
-
-  inline void do_long()   {
-    if (!is_return_type()) {
-      jvalue value;
-      value.j = (jlong)_args->get_long(_position);
-      push(java_lang_boxing_object::create(T_LONG, &value, _thread));
-    }
-  }
-
-  inline void do_float()  {
-    if (!is_return_type()) {
-      jvalue value;
-      value.f = (jfloat)_args->get_float(_position);
-      push(java_lang_boxing_object::create(T_FLOAT, &value, _thread));
-    }
-  }
-
-  inline void do_double() {
-    if (!is_return_type()) {
-      jvalue value;
-      value.d = (jdouble)_args->get_double(_position);
-      push(java_lang_boxing_object::create(T_DOUBLE, &value, _thread));
-    }
-  }
-
-  inline void do_object(int begin, int end) { if (!is_return_type()) push(next_object(T_OBJECT)); }
-  inline void do_array(int begin, int end)  { if (!is_return_type()) push(next_object(T_ARRAY)); }
-  inline void do_void()                     { }
-  
-  inline oop next_object(BasicType type) {
-    assert(type == T_OBJECT || type == T_ARRAY, "must be");
-    return *(_args->get_raw_oop(_position));
-  }
-  
-  inline void push(oop obj) {
-    _obj_array->obj_at_put(_index, obj);
-    _index++;
-  }
-};
-
-bool VMToInterpreter::allocate_interpreter(const char* interpreter_class_name, const char* interpreter_arguments, TRAPS) {
-  assert(_interpreterPermObject == NULL && _interpreterPermKlass == NULL, "no need to allocate twice");
-
-  HandleMark hm;
-  // load the interpreter class using its fully qualified class name
-  Symbol* class_name = SymbolTable::lookup(interpreter_class_name, (int)strlen(interpreter_class_name), CHECK_false);
-  instanceKlassHandle interpreter_klass = SystemDictionary::resolve_or_null(class_name, SystemDictionary::java_system_loader(), NULL, CHECK_false);
-  if (interpreter_klass.is_null()) {
-    tty->print_cr("Could not load HighLevelInterpreterClass '%s'", interpreter_class_name);
-    return false;
-  }
-
-  // allocate an interpreter instance
-  interpreter_klass->initialize(CHECK_false);
-  instanceHandle interpreter_instance = interpreter_klass->allocate_instance_handle(CHECK_false);
-  
-  // initialize the interpreter instance
-  Handle args;
-  if (interpreter_arguments != NULL) {
-    args = java_lang_String::create_from_platform_dependent_str(interpreter_arguments, CHECK_false);
-  }
-
-  JavaValue result(T_BOOLEAN);
-  JavaCalls::call_virtual(&result, interpreter_instance, interpreter_klass, vmSymbols::initialize_name(), vmSymbols::setOption_signature(), args, CHECK_false);
-  if (result.get_jboolean() != JNI_TRUE) {
-    tty->print_cr("Could not invoke '%s::initialize(String)'", interpreter_class_name);
-    return false;
-  }
-
-  // store the instance globally and keep it alive
-  _interpreterPermObject = JNIHandles::make_global(interpreter_instance);
-  _interpreterPermKlass = JNIHandles::make_global(interpreter_klass);
-
-  // register the native functions that are needed by the interpreter
-  {
-    assert(THREAD->is_Java_thread(), "must be");
-    JavaThread* thread = (JavaThread*) THREAD;
-    ThreadToNativeFromVM trans(thread);
-    JNIEnv *env = thread->jni_environment();
-    jclass klass = env->FindClass("com/oracle/graal/hotspot/HotSpotRuntimeInterpreterInterface");
-    if (klass == NULL) {
-      tty->print_cr("Could not find class HotSpotRuntimeInterpreterInterface");
-      return false;
-    }
-    env->RegisterNatives(klass, InterpreterToVM_methods, InterpreterToVM_methods_count());
-    if (thread->has_pending_exception()) {
-      tty->print_cr("Could not register HotSpotRuntimeInterpreterInterface native methods");
-      return false;
-    }
-  }
-
-  return true;
-}
-
-Handle VMToInterpreter::interpreter_instance() {
-  return Handle(JNIHandles::resolve_non_null(_interpreterPermObject));
-}
-
-KlassHandle VMToInterpreter::interpreter_klass() {
-  return KlassHandle(JNIHandles::resolve_non_null(_interpreterPermKlass));
-}
-
-void VMToInterpreter::execute(JavaValue* result, methodHandle* m, JavaCallArguments* args, BasicType expected_result_type, TRAPS) {
-  assert(interpreter_instance().not_null(), "must be allocated before the first call");
-  assert(THREAD->is_Java_thread(), "must be");
-  assert(m != NULL, "must be");
-  assert(args != NULL, "must be");
-
-  JavaThread* thread = (JavaThread*)THREAD;
-  methodHandle method = *m;
-  int parameter_count = ArgumentCount(method->signature()).size() + (method->is_static() ? 0 : 1);
-  objArrayHandle args_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), parameter_count, CHECK);
-  JavaArgumentBoxer jab(method->signature(), args_array, args, method->is_static(), thread);
-  Handle hotspot_method = GraalCompiler::createHotSpotResolvedJavaMethod(method, CHECK);
-
-  JavaValue boxed_result(T_OBJECT);
-  JavaCallArguments boxed_args;
-  boxed_args.set_receiver(interpreter_instance());
-  boxed_args.push_oop(hotspot_method);
-  boxed_args.push_oop(args_array);
-  
-#ifndef PRODUCT
-  if (PrintHighLevelInterpreterVMTransitions) {
-    ResourceMark m;
-    tty->print_cr("VM -> high level interpreter (%s)", method->name_and_sig_as_C_string());
-  }
-#endif
-  
-  thread->set_high_level_interpreter_in_vm(false);
-  JavaCalls::call_virtual(&boxed_result, interpreter_klass(), vmSymbols::interpreter_execute_name(), vmSymbols::interpreter_execute_signature(), &boxed_args, thread);
-  thread->set_high_level_interpreter_in_vm(true);
-  
-#ifndef PRODUCT
-  if (PrintHighLevelInterpreterVMTransitions) {
-    ResourceMark m;
-    tty->print_cr("High level interpreter (%s) -> VM", method->name_and_sig_as_C_string());
-  }
-#endif
-  
-  if (HAS_PENDING_EXCEPTION) {
-    return;
-  }
-
-  // unbox the result if necessary
-  if (is_java_primitive(expected_result_type)) {
-    unbox_primitive(&boxed_result, result);
-  } else if (expected_result_type == T_OBJECT || expected_result_type == T_ARRAY) {
-    result->set_jobject(boxed_result.get_jobject());
-  }
-}
-
-void VMToInterpreter::unbox_primitive(JavaValue* boxed, JavaValue* result) {
-  oop box = JNIHandles::resolve(boxed->get_jobject());
-
-  jvalue value;
-  BasicType type = java_lang_boxing_object::get_value(box, &value);
-    switch (type) {
-    case T_BOOLEAN:
-      result->set_jint(value.z);
-      break;
-    case T_CHAR:
-      result->set_jint(value.c);
-      break;
-    case T_FLOAT:
-      result->set_jfloat(value.f);
-      break;
-    case T_DOUBLE:
-      result->set_jdouble(value.d);
-      break;
-    case T_BYTE:
-      result->set_jint(value.b);
-      break;
-    case T_SHORT:
-      result->set_jint(value.s);
-      break;
-    case T_INT:
-      result->set_jint(value.i);
-      break;
-    case T_LONG:
-      result->set_jlong(value.j);
-      break;
-    default:
-      ShouldNotReachHere();
-      break;
-  }
-}
-
-#endif // HIGH_LEVEL_INTERPRETER
--- a/src/share/vm/graal/graalVMToInterpreter.hpp	Mon Nov 26 19:30:54 2012 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "memory/allocation.hpp"
-#include "oops/oop.hpp"
-#include "runtime/handles.hpp"
-#include "runtime/thread.hpp"
-#include "classfile/javaClasses.hpp"
-#include "runtime/jniHandles.hpp"
-#include "runtime/javaCalls.hpp"
-
-#ifdef HIGH_LEVEL_INTERPRETER
-#ifndef SHARE_VM_GRAAL_GRAAL_VM_TO_INTERPRETER_HPP
-#define SHARE_VM_GRAAL_GRAAL_VM_TO_INTERPRETER_HPP
-
-class VMToInterpreter : public AllStatic {
-
-private:
-  static jobject _interpreterPermObject;
-  static jobject _interpreterPermKlass;
-
-  static Handle interpreter_instance();
-  static KlassHandle interpreter_klass();
-  static void unbox_primitive(JavaValue* boxed, JavaValue* result);
-
-public:
-  static bool allocate_interpreter(const char* interpreter_class_name, const char* interpreter_arguments, TRAPS);
- 
-  // invokes the interpreter method execute(ResolvedJavaMethod method, Object... arguments)
-  static void execute(JavaValue* result, methodHandle* m, JavaCallArguments* args, BasicType expected_result_type, TRAPS);
-};
-
-#endif // SHARE_VM_GRAAL_GRAAL_VM_TO_INTERPRETER_HPP
-#endif // HIGH_LEVEL_INTERPRETER
--- a/src/share/vm/interpreter/interpreter.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/interpreter/interpreter.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -61,9 +61,8 @@
 
 void InterpreterCodelet::print_on(outputStream* st) const {
   ttyLocker ttyl;
+  if (PrintInterpreter || PrintMachineCodeToFile) {
 
-  if (PrintInterpreter) {
-    st->cr();
     st->print_cr("----------------------------------------------------------------------");
   }
 
@@ -72,8 +71,7 @@
   st->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "]  %d bytes",
                 code_begin(), code_end(), code_size());
 
-  if (PrintInterpreter) {
-    st->cr();
+  if (PrintInterpreter || PrintMachineCodeToFile) {
     Disassembler::decode(code_begin(), code_end(), st, DEBUG_ONLY(_comments) NOT_DEBUG(CodeComments()));
   }
 }
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -1,621 +1,625 @@
-/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/templateTable.hpp"
-
-#ifndef CC_INTERP
-
-# define __ _masm->
-
-void TemplateInterpreter::initialize() {
-  if (_code != NULL) return;
-  // assertions
-  assert((int)Bytecodes::number_of_codes <= (int)DispatchTable::length,
-         "dispatch table too small");
-
-  AbstractInterpreter::initialize();
-
-  TemplateTable::initialize();
-
-  // generate interpreter
-  { ResourceMark rm;
-    TraceTime timer("Interpreter generation", TraceStartupTime);
-    int code_size = InterpreterCodeSize;
-    NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
-    _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
-                          "Interpreter");
-    InterpreterGenerator g(_code);
-    if (PrintInterpreter) print();
-  }
-
-  // initialize dispatch table
-  _active_table = _normal_table;
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of EntryPoint
-
-EntryPoint::EntryPoint() {
-  assert(number_of_states == 9, "check the code below");
-  _entry[btos] = NULL;
-  _entry[ctos] = NULL;
-  _entry[stos] = NULL;
-  _entry[atos] = NULL;
-  _entry[itos] = NULL;
-  _entry[ltos] = NULL;
-  _entry[ftos] = NULL;
-  _entry[dtos] = NULL;
-  _entry[vtos] = NULL;
-}
-
-
-EntryPoint::EntryPoint(address bentry, address centry, address sentry, address aentry, address ientry, address lentry, address fentry, address dentry, address ventry) {
-  assert(number_of_states == 9, "check the code below");
-  _entry[btos] = bentry;
-  _entry[ctos] = centry;
-  _entry[stos] = sentry;
-  _entry[atos] = aentry;
-  _entry[itos] = ientry;
-  _entry[ltos] = lentry;
-  _entry[ftos] = fentry;
-  _entry[dtos] = dentry;
-  _entry[vtos] = ventry;
-}
-
-
-void EntryPoint::set_entry(TosState state, address entry) {
-  assert(0 <= state && state < number_of_states, "state out of bounds");
-  _entry[state] = entry;
-}
-
-
-address EntryPoint::entry(TosState state) const {
-  assert(0 <= state && state < number_of_states, "state out of bounds");
-  return _entry[state];
-}
-
-
-void EntryPoint::print() {
-  tty->print("[");
-  for (int i = 0; i < number_of_states; i++) {
-    if (i > 0) tty->print(", ");
-    tty->print(INTPTR_FORMAT, _entry[i]);
-  }
-  tty->print("]");
-}
-
-
-bool EntryPoint::operator == (const EntryPoint& y) {
-  int i = number_of_states;
-  while (i-- > 0) {
-    if (_entry[i] != y._entry[i]) return false;
-  }
-  return true;
-}
-
-
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of DispatchTable
-
-EntryPoint DispatchTable::entry(int i) const {
-  assert(0 <= i && i < length, "index out of bounds");
-  return
-    EntryPoint(
-      _table[btos][i],
-      _table[ctos][i],
-      _table[stos][i],
-      _table[atos][i],
-      _table[itos][i],
-      _table[ltos][i],
-      _table[ftos][i],
-      _table[dtos][i],
-      _table[vtos][i]
-    );
-}
-
-
-void DispatchTable::set_entry(int i, EntryPoint& entry) {
-  assert(0 <= i && i < length, "index out of bounds");
-  assert(number_of_states == 9, "check the code below");
-  _table[btos][i] = entry.entry(btos);
-  _table[ctos][i] = entry.entry(ctos);
-  _table[stos][i] = entry.entry(stos);
-  _table[atos][i] = entry.entry(atos);
-  _table[itos][i] = entry.entry(itos);
-  _table[ltos][i] = entry.entry(ltos);
-  _table[ftos][i] = entry.entry(ftos);
-  _table[dtos][i] = entry.entry(dtos);
-  _table[vtos][i] = entry.entry(vtos);
-}
-
-
-bool DispatchTable::operator == (DispatchTable& y) {
-  int i = length;
-  while (i-- > 0) {
-    EntryPoint t = y.entry(i); // for compiler compatibility (BugId 4150096)
-    if (!(entry(i) == t)) return false;
-  }
-  return true;
-}
-
-address    TemplateInterpreter::_remove_activation_entry                    = NULL;
-address    TemplateInterpreter::_remove_activation_preserving_args_entry    = NULL;
-
-
-address    TemplateInterpreter::_throw_ArrayIndexOutOfBoundsException_entry = NULL;
-address    TemplateInterpreter::_throw_ArrayStoreException_entry            = NULL;
-address    TemplateInterpreter::_throw_ArithmeticException_entry            = NULL;
-address    TemplateInterpreter::_throw_ClassCastException_entry             = NULL;
-address    TemplateInterpreter::_throw_NullPointerException_entry           = NULL;
-address    TemplateInterpreter::_throw_StackOverflowError_entry             = NULL;
-address    TemplateInterpreter::_throw_exception_entry                      = NULL;
-
-#ifndef PRODUCT
-EntryPoint TemplateInterpreter::_trace_code;
-#endif // !PRODUCT
-EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
-EntryPoint TemplateInterpreter::_earlyret_entry;
-EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
-EntryPoint TemplateInterpreter::_continuation_entry;
-EntryPoint TemplateInterpreter::_safept_entry;
-
-address    TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
-address    TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
-
-DispatchTable TemplateInterpreter::_active_table;
-DispatchTable TemplateInterpreter::_normal_table;
-DispatchTable TemplateInterpreter::_safept_table;
-address    TemplateInterpreter::_wentry_point[DispatchTable::length];
-
-TemplateInterpreterGenerator::TemplateInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
-  _unimplemented_bytecode    = NULL;
-  _illegal_bytecode_sequence = NULL;
-}
-
-static const BasicType types[Interpreter::number_of_result_handlers] = {
-  T_BOOLEAN,
-  T_CHAR   ,
-  T_BYTE   ,
-  T_SHORT  ,
-  T_INT    ,
-  T_LONG   ,
-  T_VOID   ,
-  T_FLOAT  ,
-  T_DOUBLE ,
-  T_OBJECT
-};
-
-void TemplateInterpreterGenerator::generate_all() {
-  AbstractInterpreterGenerator::generate_all();
-
-  { CodeletMark cm(_masm, "error exits");
-    _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
-    _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
-  }
-
-#ifndef PRODUCT
-  if (TraceBytecodes) {
-    CodeletMark cm(_masm, "bytecode tracing support");
-    Interpreter::_trace_code =
-      EntryPoint(
-        generate_trace_code(btos),
-        generate_trace_code(ctos),
-        generate_trace_code(stos),
-        generate_trace_code(atos),
-        generate_trace_code(itos),
-        generate_trace_code(ltos),
-        generate_trace_code(ftos),
-        generate_trace_code(dtos),
-        generate_trace_code(vtos)
-      );
-  }
-#endif // !PRODUCT
-
-  { CodeletMark cm(_masm, "return entry points");
-    for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
-      Interpreter::_return_entry[i] =
-        EntryPoint(
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(atos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(ltos, i),
-          generate_return_entry_for(ftos, i),
-          generate_return_entry_for(dtos, i),
-          generate_return_entry_for(vtos, i)
-        );
-    }
-  }
-
-  { CodeletMark cm(_masm, "earlyret entry points");
-    Interpreter::_earlyret_entry =
-      EntryPoint(
-        generate_earlyret_entry_for(btos),
-        generate_earlyret_entry_for(ctos),
-        generate_earlyret_entry_for(stos),
-        generate_earlyret_entry_for(atos),
-        generate_earlyret_entry_for(itos),
-        generate_earlyret_entry_for(ltos),
-        generate_earlyret_entry_for(ftos),
-        generate_earlyret_entry_for(dtos),
-        generate_earlyret_entry_for(vtos)
-      );
-  }
-
-  { CodeletMark cm(_masm, "deoptimization entry points");
-    for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
-      Interpreter::_deopt_entry[i] =
-        EntryPoint(
-          generate_deopt_entry_for(itos, i),
-          generate_deopt_entry_for(itos, i),
-          generate_deopt_entry_for(itos, i),
-          generate_deopt_entry_for(atos, i),
-          generate_deopt_entry_for(itos, i),
-          generate_deopt_entry_for(ltos, i),
-          generate_deopt_entry_for(ftos, i),
-          generate_deopt_entry_for(dtos, i),
-          generate_deopt_entry_for(vtos, i)
-        );
-    }
-  }
-
-  { CodeletMark cm(_masm, "result handlers for native calls");
-    // The various result converter stublets.
-    int is_generated[Interpreter::number_of_result_handlers];
-    memset(is_generated, 0, sizeof(is_generated));
-
-    for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
-      BasicType type = types[i];
-      if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
-        Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
-      }
-    }
-  }
-
-  for (int j = 0; j < number_of_states; j++) {
-    const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
-    int index = Interpreter::TosState_as_index(states[j]);
-    Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
-    Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
-  }
-
-  { CodeletMark cm(_masm, "continuation entry points");
-    Interpreter::_continuation_entry =
-      EntryPoint(
-        generate_continuation_for(btos),
-        generate_continuation_for(ctos),
-        generate_continuation_for(stos),
-        generate_continuation_for(atos),
-        generate_continuation_for(itos),
-        generate_continuation_for(ltos),
-        generate_continuation_for(ftos),
-        generate_continuation_for(dtos),
-        generate_continuation_for(vtos)
-      );
-  }
-
-  { CodeletMark cm(_masm, "safepoint entry points");
-    Interpreter::_safept_entry =
-      EntryPoint(
-        generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
-        generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
-      );
-  }
-
-  { CodeletMark cm(_masm, "exception handling");
-    // (Note: this is not safepoint safe because thread may return to compiled code)
-    generate_throw_exception();
-  }
-
-  { CodeletMark cm(_masm, "throw exception entrypoints");
-    Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
-    Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
-    Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
-    Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
-    Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
-    Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
-  }
-
-
-
-#define method_entry(kind)                                                                    \
-  { CodeletMark cm(_masm, "method entry point (kind = " #kind ")");                    \
-    Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind);  \
-  }
-
-  // all non-native method kinds
-  method_entry(zerolocals)
-  method_entry(zerolocals_synchronized)
-  method_entry(empty)
-  method_entry(accessor)
-  method_entry(abstract)
-  method_entry(java_lang_math_sin  )
-  method_entry(java_lang_math_cos  )
-  method_entry(java_lang_math_tan  )
-  method_entry(java_lang_math_abs  )
-  method_entry(java_lang_math_sqrt )
-  method_entry(java_lang_math_log  )
-  method_entry(java_lang_math_log10)
-  method_entry(java_lang_math_exp  )
-  method_entry(java_lang_math_pow  )
-  method_entry(java_lang_ref_reference_get)
-
-  initialize_method_handle_entries();
-
-  // all native method kinds (must be one contiguous block)
-  Interpreter::_native_entry_begin = Interpreter::code()->code_end();
-  method_entry(native)
-  method_entry(native_synchronized)
-  Interpreter::_native_entry_end = Interpreter::code()->code_end();
-
-#undef method_entry
-
-  // Bytecodes
-  set_entry_points_for_all_bytes();
-  set_safepoints_for_all_bytes();
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_error_exit(const char* msg) {
-  address entry = __ pc();
-  __ stop(msg);
-  return entry;
-}
-
-
-//------------------------------------------------------------------------------------------------------------------------
-
-void TemplateInterpreterGenerator::set_entry_points_for_all_bytes() {
-  for (int i = 0; i < DispatchTable::length; i++) {
-    Bytecodes::Code code = (Bytecodes::Code)i;
-    if (Bytecodes::is_defined(code)) {
-      set_entry_points(code);
-    } else {
-      set_unimplemented(i);
-    }
-  }
-}
-
-
-void TemplateInterpreterGenerator::set_safepoints_for_all_bytes() {
-  for (int i = 0; i < DispatchTable::length; i++) {
-    Bytecodes::Code code = (Bytecodes::Code)i;
-    if (Bytecodes::is_defined(code)) Interpreter::_safept_table.set_entry(code, Interpreter::_safept_entry);
-  }
-}
-
-
-void TemplateInterpreterGenerator::set_unimplemented(int i) {
-  address e = _unimplemented_bytecode;
-  EntryPoint entry(e, e, e, e, e, e, e, e, e);
-  Interpreter::_normal_table.set_entry(i, entry);
-  Interpreter::_wentry_point[i] = _unimplemented_bytecode;
-}
-
-
-void TemplateInterpreterGenerator::set_entry_points(Bytecodes::Code code) {
-  CodeletMark cm(_masm, Bytecodes::name(code), code);
-  // initialize entry points
-  assert(_unimplemented_bytecode    != NULL, "should have been generated before");
-  assert(_illegal_bytecode_sequence != NULL, "should have been generated before");
-  address bep = _illegal_bytecode_sequence;
-  address cep = _illegal_bytecode_sequence;
-  address sep = _illegal_bytecode_sequence;
-  address aep = _illegal_bytecode_sequence;
-  address iep = _illegal_bytecode_sequence;
-  address lep = _illegal_bytecode_sequence;
-  address fep = _illegal_bytecode_sequence;
-  address dep = _illegal_bytecode_sequence;
-  address vep = _unimplemented_bytecode;
-  address wep = _unimplemented_bytecode;
-  // code for short & wide version of bytecode
-  if (Bytecodes::is_defined(code)) {
-    Template* t = TemplateTable::template_for(code);
-    assert(t->is_valid(), "just checking");
-    set_short_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);
-  }
-  if (Bytecodes::wide_is_defined(code)) {
-    Template* t = TemplateTable::template_for_wide(code);
-    assert(t->is_valid(), "just checking");
-    set_wide_entry_point(t, wep);
-  }
-  // set entry points
-  EntryPoint entry(bep, cep, sep, aep, iep, lep, fep, dep, vep);
-  Interpreter::_normal_table.set_entry(code, entry);
-  Interpreter::_wentry_point[code] = wep;
-}
-
-
-void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
-  assert(t->is_valid(), "template must exist");
-  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions");
-  wep = __ pc(); generate_and_dispatch(t);
-}
-
-
-void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
-  assert(t->is_valid(), "template must exist");
-  switch (t->tos_in()) {
-    case btos:
-    case ctos:
-    case stos:
-      ShouldNotReachHere();  // btos/ctos/stos should use itos.
-      break;
-    case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
-    case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
-    case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
-    case ftos: vep = __ pc(); __ pop(ftos); fep = __ pc(); generate_and_dispatch(t); break;
-    case dtos: vep = __ pc(); __ pop(dtos); dep = __ pc(); generate_and_dispatch(t); break;
-    case vtos: set_vtos_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);     break;
-    default  : ShouldNotReachHere();                                                 break;
-  }
-}
-
-
-//------------------------------------------------------------------------------------------------------------------------
-
-void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
-  if (PrintBytecodeHistogram)                                    histogram_bytecode(t);
-#ifndef PRODUCT
-  // debugging code
-  if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
-  if (PrintBytecodePairHistogram)                                histogram_bytecode_pair(t);
-  if (TraceBytecodes)                                            trace_bytecode(t);
-  if (StopInterpreterAt > 0)                                     stop_interpreter_at();
-  __ verify_FPU(1, t->tos_in());
-#endif // !PRODUCT
-  int step;
-  if (!t->does_dispatch()) {
-    step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
-    if (tos_out == ilgl) tos_out = t->tos_out();
-    // compute bytecode size
-    assert(step > 0, "just checkin'");
-    // setup stuff for dispatching next bytecode
-    if (ProfileInterpreter && VerifyDataPointer
-        && MethodData::bytecode_has_profile(t->bytecode())) {
-      __ verify_method_data_pointer();
-    }
-    __ dispatch_prolog(tos_out, step);
-  }
-  // generate template
-  t->generate(_masm);
-  // advance
-  if (t->does_dispatch()) {
-#ifdef ASSERT
-    // make sure execution doesn't go beyond this point if code is broken
-    __ should_not_reach_here();
-#endif // ASSERT
-  } else {
-    // dispatch to next bytecode
-    __ dispatch_epilog(tos_out, step);
-  }
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Entry points
-
-address TemplateInterpreter::return_entry(TosState state, int length) {
-  guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length");
-  return _return_entry[length].entry(state);
-}
-
-
-address TemplateInterpreter::deopt_entry(TosState state, int length) {
-  guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
-  return _deopt_entry[length].entry(state);
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Suport for invokes
-
-int TemplateInterpreter::TosState_as_index(TosState state) {
-  assert( state < number_of_states , "Invalid state in TosState_as_index");
-  assert(0 <= (int)state && (int)state < TemplateInterpreter::number_of_return_addrs, "index out of bounds");
-  return (int)state;
-}
-
-
-//------------------------------------------------------------------------------------------------------------------------
-// Safepoint suppport
-
-static inline void copy_table(address* from, address* to, int size) {
-  // Copy non-overlapping tables. The copy has to occur word wise for MT safety.
-  while (size-- > 0) *to++ = *from++;
-}
-
-void TemplateInterpreter::notice_safepoints() {
-  if (!_notice_safepoints) {
-    // switch to safepoint dispatch table
-    _notice_safepoints = true;
-    copy_table((address*)&_safept_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
-  }
-}
-
-// switch from the dispatch table which notices safepoints back to the
-// normal dispatch table.  So that we can notice single stepping points,
-// keep the safepoint dispatch table if we are single stepping in JVMTI.
-// Note that the should_post_single_step test is exactly as fast as the
-// JvmtiExport::_enabled test and covers both cases.
-void TemplateInterpreter::ignore_safepoints() {
-  if (_notice_safepoints) {
-    if (!JvmtiExport::should_post_single_step()) {
-      // switch to normal dispatch table
-      _notice_safepoints = false;
-      copy_table((address*)&_normal_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
-    }
-  }
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Deoptimization support
-
-// If deoptimization happens, this function returns the point of next bytecode to continue execution
-address TemplateInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
-  return AbstractInterpreter::deopt_continue_after_entry(method, bcp, callee_parameters, is_top_frame);
-}
-
-// If deoptimization happens, this function returns the point where the interpreter reexecutes
-// the bytecode.
-// Note: Bytecodes::_athrow (C1 only) and Bytecodes::_return are the special cases
-//       that do not return "Interpreter::deopt_entry(vtos, 0)"
-address TemplateInterpreter::deopt_reexecute_entry(Method* method, address bcp) {
-  assert(method->contains(bcp), "just checkin'");
-  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
-  if (code == Bytecodes::_return) {
-    // This is used for deopt during registration of finalizers
-    // during Object.<init>.  We simply need to resume execution at
-    // the standard return vtos bytecode to pop the frame normally.
-    // reexecuting the real bytecode would cause double registration
-    // of the finalizable object.
-    return _normal_table.entry(Bytecodes::_return).entry(vtos);
-  } else {
-    return AbstractInterpreter::deopt_reexecute_entry(method, bcp);
-  }
-}
-
-// If deoptimization happens, the interpreter should reexecute this bytecode.
-// This function mainly helps the compilers to set up the reexecute bit.
-bool TemplateInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
-  if (code == Bytecodes::_return) {
-    //Yes, we consider Bytecodes::_return as a special case of reexecution
-    return true;
-  } else {
-    return AbstractInterpreter::bytecode_should_reexecute(code);
-  }
-}
-
-#endif // !CC_INTERP
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/templateTable.hpp"
+#include "utilities/machineCodePrinter.hpp"
+
+#ifndef CC_INTERP
+
+# define __ _masm->
+
+void TemplateInterpreter::initialize() {
+  if (_code != NULL) return;
+  // assertions
+  assert((int)Bytecodes::number_of_codes <= (int)DispatchTable::length,
+         "dispatch table too small");
+
+  AbstractInterpreter::initialize();
+
+  TemplateTable::initialize();
+
+  // generate interpreter
+  { ResourceMark rm;
+    TraceTime timer("Interpreter generation", TraceStartupTime);
+    int code_size = InterpreterCodeSize;
+    NOT_PRODUCT(code_size *= 4;)  // debug uses extra interpreter code space
+    _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL,
+                          "Interpreter");
+    InterpreterGenerator g(_code);
+    if (PrintInterpreter) print();
+    if (PrintMachineCodeToFile) {
+      MachineCodePrinter::print(_code);
+    }
+  }
+
+  // initialize dispatch table
+  _active_table = _normal_table;
+}
+
+//------------------------------------------------------------------------------------------------------------------------
+// Implementation of EntryPoint
+
+EntryPoint::EntryPoint() {
+  assert(number_of_states == 9, "check the code below");
+  _entry[btos] = NULL;
+  _entry[ctos] = NULL;
+  _entry[stos] = NULL;
+  _entry[atos] = NULL;
+  _entry[itos] = NULL;
+  _entry[ltos] = NULL;
+  _entry[ftos] = NULL;
+  _entry[dtos] = NULL;
+  _entry[vtos] = NULL;
+}
+
+
+EntryPoint::EntryPoint(address bentry, address centry, address sentry, address aentry, address ientry, address lentry, address fentry, address dentry, address ventry) {
+  assert(number_of_states == 9, "check the code below");
+  _entry[btos] = bentry;
+  _entry[ctos] = centry;
+  _entry[stos] = sentry;
+  _entry[atos] = aentry;
+  _entry[itos] = ientry;
+  _entry[ltos] = lentry;
+  _entry[ftos] = fentry;
+  _entry[dtos] = dentry;
+  _entry[vtos] = ventry;
+}
+
+
+void EntryPoint::set_entry(TosState state, address entry) {
+  assert(0 <= state && state < number_of_states, "state out of bounds");
+  _entry[state] = entry;
+}
+
+
+address EntryPoint::entry(TosState state) const {
+  assert(0 <= state && state < number_of_states, "state out of bounds");
+  return _entry[state];
+}
+
+
+void EntryPoint::print() {
+  tty->print("[");
+  for (int i = 0; i < number_of_states; i++) {
+    if (i > 0) tty->print(", ");
+    tty->print(INTPTR_FORMAT, _entry[i]);
+  }
+  tty->print("]");
+}
+
+
+bool EntryPoint::operator == (const EntryPoint& y) {
+  int i = number_of_states;
+  while (i-- > 0) {
+    if (_entry[i] != y._entry[i]) return false;
+  }
+  return true;
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Implementation of DispatchTable
+
+EntryPoint DispatchTable::entry(int i) const {
+  assert(0 <= i && i < length, "index out of bounds");
+  return
+    EntryPoint(
+      _table[btos][i],
+      _table[ctos][i],
+      _table[stos][i],
+      _table[atos][i],
+      _table[itos][i],
+      _table[ltos][i],
+      _table[ftos][i],
+      _table[dtos][i],
+      _table[vtos][i]
+    );
+}
+
+
+void DispatchTable::set_entry(int i, EntryPoint& entry) {
+  assert(0 <= i && i < length, "index out of bounds");
+  assert(number_of_states == 9, "check the code below");
+  _table[btos][i] = entry.entry(btos);
+  _table[ctos][i] = entry.entry(ctos);
+  _table[stos][i] = entry.entry(stos);
+  _table[atos][i] = entry.entry(atos);
+  _table[itos][i] = entry.entry(itos);
+  _table[ltos][i] = entry.entry(ltos);
+  _table[ftos][i] = entry.entry(ftos);
+  _table[dtos][i] = entry.entry(dtos);
+  _table[vtos][i] = entry.entry(vtos);
+}
+
+
+bool DispatchTable::operator == (DispatchTable& y) {
+  int i = length;
+  while (i-- > 0) {
+    EntryPoint t = y.entry(i); // for compiler compatibility (BugId 4150096)
+    if (!(entry(i) == t)) return false;
+  }
+  return true;
+}
+
+address    TemplateInterpreter::_remove_activation_entry                    = NULL;
+address    TemplateInterpreter::_remove_activation_preserving_args_entry    = NULL;
+
+
+address    TemplateInterpreter::_throw_ArrayIndexOutOfBoundsException_entry = NULL;
+address    TemplateInterpreter::_throw_ArrayStoreException_entry            = NULL;
+address    TemplateInterpreter::_throw_ArithmeticException_entry            = NULL;
+address    TemplateInterpreter::_throw_ClassCastException_entry             = NULL;
+address    TemplateInterpreter::_throw_NullPointerException_entry           = NULL;
+address    TemplateInterpreter::_throw_StackOverflowError_entry             = NULL;
+address    TemplateInterpreter::_throw_exception_entry                      = NULL;
+
+#ifndef PRODUCT
+EntryPoint TemplateInterpreter::_trace_code;
+#endif // !PRODUCT
+EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
+EntryPoint TemplateInterpreter::_earlyret_entry;
+EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
+EntryPoint TemplateInterpreter::_continuation_entry;
+EntryPoint TemplateInterpreter::_safept_entry;
+
+address    TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
+address    TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
+
+DispatchTable TemplateInterpreter::_active_table;
+DispatchTable TemplateInterpreter::_normal_table;
+DispatchTable TemplateInterpreter::_safept_table;
+address    TemplateInterpreter::_wentry_point[DispatchTable::length];
+
+TemplateInterpreterGenerator::TemplateInterpreterGenerator(StubQueue* _code): AbstractInterpreterGenerator(_code) {
+  _unimplemented_bytecode    = NULL;
+  _illegal_bytecode_sequence = NULL;
+}
+
+static const BasicType types[Interpreter::number_of_result_handlers] = {
+  T_BOOLEAN,
+  T_CHAR   ,
+  T_BYTE   ,
+  T_SHORT  ,
+  T_INT    ,
+  T_LONG   ,
+  T_VOID   ,
+  T_FLOAT  ,
+  T_DOUBLE ,
+  T_OBJECT
+};
+
+void TemplateInterpreterGenerator::generate_all() {
+  AbstractInterpreterGenerator::generate_all();
+
+  { CodeletMark cm(_masm, "error exits");
+    _unimplemented_bytecode    = generate_error_exit("unimplemented bytecode");
+    _illegal_bytecode_sequence = generate_error_exit("illegal bytecode sequence - method not verified");
+  }
+
+#ifndef PRODUCT
+  if (TraceBytecodes) {
+    CodeletMark cm(_masm, "bytecode tracing support");
+    Interpreter::_trace_code =
+      EntryPoint(
+        generate_trace_code(btos),
+        generate_trace_code(ctos),
+        generate_trace_code(stos),
+        generate_trace_code(atos),
+        generate_trace_code(itos),
+        generate_trace_code(ltos),
+        generate_trace_code(ftos),
+        generate_trace_code(dtos),
+        generate_trace_code(vtos)
+      );
+  }
+#endif // !PRODUCT
+
+  { CodeletMark cm(_masm, "return entry points");
+    for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
+      Interpreter::_return_entry[i] =
+        EntryPoint(
+          generate_return_entry_for(itos, i),
+          generate_return_entry_for(itos, i),
+          generate_return_entry_for(itos, i),
+          generate_return_entry_for(atos, i),
+          generate_return_entry_for(itos, i),
+          generate_return_entry_for(ltos, i),
+          generate_return_entry_for(ftos, i),
+          generate_return_entry_for(dtos, i),
+          generate_return_entry_for(vtos, i)
+        );
+    }
+  }
+
+  { CodeletMark cm(_masm, "earlyret entry points");
+    Interpreter::_earlyret_entry =
+      EntryPoint(
+        generate_earlyret_entry_for(btos),
+        generate_earlyret_entry_for(ctos),
+        generate_earlyret_entry_for(stos),
+        generate_earlyret_entry_for(atos),
+        generate_earlyret_entry_for(itos),
+        generate_earlyret_entry_for(ltos),
+        generate_earlyret_entry_for(ftos),
+        generate_earlyret_entry_for(dtos),
+        generate_earlyret_entry_for(vtos)
+      );
+  }
+
+  { CodeletMark cm(_masm, "deoptimization entry points");
+    for (int i = 0; i < Interpreter::number_of_deopt_entries; i++) {
+      Interpreter::_deopt_entry[i] =
+        EntryPoint(
+          generate_deopt_entry_for(itos, i),
+          generate_deopt_entry_for(itos, i),
+          generate_deopt_entry_for(itos, i),
+          generate_deopt_entry_for(atos, i),
+          generate_deopt_entry_for(itos, i),
+          generate_deopt_entry_for(ltos, i),
+          generate_deopt_entry_for(ftos, i),
+          generate_deopt_entry_for(dtos, i),
+          generate_deopt_entry_for(vtos, i)
+        );
+    }
+  }
+
+  { CodeletMark cm(_masm, "result handlers for native calls");
+    // The various result converter stublets.
+    int is_generated[Interpreter::number_of_result_handlers];
+    memset(is_generated, 0, sizeof(is_generated));
+
+    for (int i = 0; i < Interpreter::number_of_result_handlers; i++) {
+      BasicType type = types[i];
+      if (!is_generated[Interpreter::BasicType_as_index(type)]++) {
+        Interpreter::_native_abi_to_tosca[Interpreter::BasicType_as_index(type)] = generate_result_handler_for(type);
+      }
+    }
+  }
+
+  for (int j = 0; j < number_of_states; j++) {
+    const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
+    int index = Interpreter::TosState_as_index(states[j]);
+    Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
+    Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
+  }
+
+  { CodeletMark cm(_masm, "continuation entry points");
+    Interpreter::_continuation_entry =
+      EntryPoint(
+        generate_continuation_for(btos),
+        generate_continuation_for(ctos),
+        generate_continuation_for(stos),
+        generate_continuation_for(atos),
+        generate_continuation_for(itos),
+        generate_continuation_for(ltos),
+        generate_continuation_for(ftos),
+        generate_continuation_for(dtos),
+        generate_continuation_for(vtos)
+      );
+  }
+
+  { CodeletMark cm(_masm, "safepoint entry points");
+    Interpreter::_safept_entry =
+      EntryPoint(
+        generate_safept_entry_for(btos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(ctos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(stos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(atos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(itos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(ltos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(ftos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(dtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)),
+        generate_safept_entry_for(vtos, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint))
+      );
+  }
+
+  { CodeletMark cm(_masm, "exception handling");
+    // (Note: this is not safepoint safe because thread may return to compiled code)
+    generate_throw_exception();
+  }
+
+  { CodeletMark cm(_masm, "throw exception entrypoints");
+    Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
+    Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
+    Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
+    Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
+    Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
+    Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
+  }
+
+
+
+#define method_entry(kind)                                                                    \
+  { CodeletMark cm(_masm, "method entry point (kind = " #kind ")");                    \
+    Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind);  \
+  }
+
+  // all non-native method kinds
+  method_entry(zerolocals)
+  method_entry(zerolocals_synchronized)
+  method_entry(empty)
+  method_entry(accessor)
+  method_entry(abstract)
+  method_entry(java_lang_math_sin  )
+  method_entry(java_lang_math_cos  )
+  method_entry(java_lang_math_tan  )
+  method_entry(java_lang_math_abs  )
+  method_entry(java_lang_math_sqrt )
+  method_entry(java_lang_math_log  )
+  method_entry(java_lang_math_log10)
+  method_entry(java_lang_math_exp  )
+  method_entry(java_lang_math_pow  )
+  method_entry(java_lang_ref_reference_get)
+
+  initialize_method_handle_entries();
+
+  // all native method kinds (must be one contiguous block)
+  Interpreter::_native_entry_begin = Interpreter::code()->code_end();
+  method_entry(native)
+  method_entry(native_synchronized)
+  Interpreter::_native_entry_end = Interpreter::code()->code_end();
+
+#undef method_entry
+
+  // Bytecodes
+  set_entry_points_for_all_bytes();
+  set_safepoints_for_all_bytes();
+}
+
+//------------------------------------------------------------------------------------------------------------------------
+
+address TemplateInterpreterGenerator::generate_error_exit(const char* msg) {
+  address entry = __ pc();
+  __ stop(msg);
+  return entry;
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+
+void TemplateInterpreterGenerator::set_entry_points_for_all_bytes() {
+  for (int i = 0; i < DispatchTable::length; i++) {
+    Bytecodes::Code code = (Bytecodes::Code)i;
+    if (Bytecodes::is_defined(code)) {
+      set_entry_points(code);
+    } else {
+      set_unimplemented(i);
+    }
+  }
+}
+
+
+void TemplateInterpreterGenerator::set_safepoints_for_all_bytes() {
+  for (int i = 0; i < DispatchTable::length; i++) {
+    Bytecodes::Code code = (Bytecodes::Code)i;
+    if (Bytecodes::is_defined(code)) Interpreter::_safept_table.set_entry(code, Interpreter::_safept_entry);
+  }
+}
+
+
+void TemplateInterpreterGenerator::set_unimplemented(int i) {
+  address e = _unimplemented_bytecode;
+  EntryPoint entry(e, e, e, e, e, e, e, e, e);
+  Interpreter::_normal_table.set_entry(i, entry);
+  Interpreter::_wentry_point[i] = _unimplemented_bytecode;
+}
+
+
+void TemplateInterpreterGenerator::set_entry_points(Bytecodes::Code code) {
+  CodeletMark cm(_masm, Bytecodes::name(code), code);
+  // initialize entry points
+  assert(_unimplemented_bytecode    != NULL, "should have been generated before");
+  assert(_illegal_bytecode_sequence != NULL, "should have been generated before");
+  address bep = _illegal_bytecode_sequence;
+  address cep = _illegal_bytecode_sequence;
+  address sep = _illegal_bytecode_sequence;
+  address aep = _illegal_bytecode_sequence;
+  address iep = _illegal_bytecode_sequence;
+  address lep = _illegal_bytecode_sequence;
+  address fep = _illegal_bytecode_sequence;
+  address dep = _illegal_bytecode_sequence;
+  address vep = _unimplemented_bytecode;
+  address wep = _unimplemented_bytecode;
+  // code for short & wide version of bytecode
+  if (Bytecodes::is_defined(code)) {
+    Template* t = TemplateTable::template_for(code);
+    assert(t->is_valid(), "just checking");
+    set_short_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);
+  }
+  if (Bytecodes::wide_is_defined(code)) {
+    Template* t = TemplateTable::template_for_wide(code);
+    assert(t->is_valid(), "just checking");
+    set_wide_entry_point(t, wep);
+  }
+  // set entry points
+  EntryPoint entry(bep, cep, sep, aep, iep, lep, fep, dep, vep);
+  Interpreter::_normal_table.set_entry(code, entry);
+  Interpreter::_wentry_point[code] = wep;
+}
+
+
+void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
+  assert(t->is_valid(), "template must exist");
+  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions");
+  wep = __ pc(); generate_and_dispatch(t);
+}
+
+
+void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
+  assert(t->is_valid(), "template must exist");
+  switch (t->tos_in()) {
+    case btos:
+    case ctos:
+    case stos:
+      ShouldNotReachHere();  // btos/ctos/stos should use itos.
+      break;
+    case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
+    case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
+    case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
+    case ftos: vep = __ pc(); __ pop(ftos); fep = __ pc(); generate_and_dispatch(t); break;
+    case dtos: vep = __ pc(); __ pop(dtos); dep = __ pc(); generate_and_dispatch(t); break;
+    case vtos: set_vtos_entry_points(t, bep, cep, sep, aep, iep, lep, fep, dep, vep);     break;
+    default  : ShouldNotReachHere();                                                 break;
+  }
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+
+void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
+  if (PrintBytecodeHistogram)                                    histogram_bytecode(t);
+#ifndef PRODUCT
+  // debugging code
+  if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
+  if (PrintBytecodePairHistogram)                                histogram_bytecode_pair(t);
+  if (TraceBytecodes)                                            trace_bytecode(t);
+  if (StopInterpreterAt > 0)                                     stop_interpreter_at();
+  __ verify_FPU(1, t->tos_in());
+#endif // !PRODUCT
+  int step;
+  if (!t->does_dispatch()) {
+    step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
+    if (tos_out == ilgl) tos_out = t->tos_out();
+    // compute bytecode size
+    assert(step > 0, "just checkin'");
+    // setup stuff for dispatching next bytecode
+    if (ProfileInterpreter && VerifyDataPointer
+        && MethodData::bytecode_has_profile(t->bytecode())) {
+      __ verify_method_data_pointer();
+    }
+    __ dispatch_prolog(tos_out, step);
+  }
+  // generate template
+  t->generate(_masm);
+  // advance
+  if (t->does_dispatch()) {
+#ifdef ASSERT
+    // make sure execution doesn't go beyond this point if code is broken
+    __ should_not_reach_here();
+#endif // ASSERT
+  } else {
+    // dispatch to next bytecode
+    __ dispatch_epilog(tos_out, step);
+  }
+}
+
+//------------------------------------------------------------------------------------------------------------------------
+// Entry points
+
+address TemplateInterpreter::return_entry(TosState state, int length) {
+  guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length");
+  return _return_entry[length].entry(state);
+}
+
+
+address TemplateInterpreter::deopt_entry(TosState state, int length) {
+  guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
+  return _deopt_entry[length].entry(state);
+}
+
+//------------------------------------------------------------------------------------------------------------------------
+// Suport for invokes
+
+int TemplateInterpreter::TosState_as_index(TosState state) {
+  assert( state < number_of_states , "Invalid state in TosState_as_index");
+  assert(0 <= (int)state && (int)state < TemplateInterpreter::number_of_return_addrs, "index out of bounds");
+  return (int)state;
+}
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Safepoint suppport
+
+static inline void copy_table(address* from, address* to, int size) {
+  // Copy non-overlapping tables. The copy has to occur word wise for MT safety.
+  while (size-- > 0) *to++ = *from++;
+}
+
+void TemplateInterpreter::notice_safepoints() {
+  if (!_notice_safepoints) {
+    // switch to safepoint dispatch table
+    _notice_safepoints = true;
+    copy_table((address*)&_safept_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
+  }
+}
+
+// switch from the dispatch table which notices safepoints back to the
+// normal dispatch table.  So that we can notice single stepping points,
+// keep the safepoint dispatch table if we are single stepping in JVMTI.
+// Note that the should_post_single_step test is exactly as fast as the
+// JvmtiExport::_enabled test and covers both cases.
+void TemplateInterpreter::ignore_safepoints() {
+  if (_notice_safepoints) {
+    if (!JvmtiExport::should_post_single_step()) {
+      // switch to normal dispatch table
+      _notice_safepoints = false;
+      copy_table((address*)&_normal_table, (address*)&_active_table, sizeof(_active_table) / sizeof(address));
+    }
+  }
+}
+
+//------------------------------------------------------------------------------------------------------------------------
+// Deoptimization support
+
+// If deoptimization happens, this function returns the point of next bytecode to continue execution
+address TemplateInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
+  return AbstractInterpreter::deopt_continue_after_entry(method, bcp, callee_parameters, is_top_frame);
+}
+
+// If deoptimization happens, this function returns the point where the interpreter reexecutes
+// the bytecode.
+// Note: Bytecodes::_athrow (C1 only) and Bytecodes::_return are the special cases
+//       that do not return "Interpreter::deopt_entry(vtos, 0)"
+address TemplateInterpreter::deopt_reexecute_entry(Method* method, address bcp) {
+  assert(method->contains(bcp), "just checkin'");
+  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
+  if (code == Bytecodes::_return) {
+    // This is used for deopt during registration of finalizers
+    // during Object.<init>.  We simply need to resume execution at
+    // the standard return vtos bytecode to pop the frame normally.
+    // reexecuting the real bytecode would cause double registration
+    // of the finalizable object.
+    return _normal_table.entry(Bytecodes::_return).entry(vtos);
+  } else {
+    return AbstractInterpreter::deopt_reexecute_entry(method, bcp);
+  }
+}
+
+// If deoptimization happens, the interpreter should reexecute this bytecode.
+// This function mainly helps the compilers to set up the reexecute bit.
+bool TemplateInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
+  if (code == Bytecodes::_return) {
+    //Yes, we consider Bytecodes::_return as a special case of reexecution
+    return true;
+  } else {
+    return AbstractInterpreter::bytecode_should_reexecute(code);
+  }
+}
+
+#endif // !CC_INTERP
--- a/src/share/vm/prims/jni.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/prims/jni.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -35,9 +35,6 @@
 #ifdef GRAAL
 #include "graal/graalCompiler.hpp"
 #endif
-#ifdef HIGH_LEVEL_INTERPRETER
-#include "graal/graalVMToInterpreter.hpp"
-#endif
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #endif // SERIALGC
@@ -1345,18 +1342,6 @@
   // Initialize result type (must be done after args->iterate())
   result->set_type(args->get_ret_type());
 
-#ifdef HIGH_LEVEL_INTERPRETER
-  // TODO (chaeubl): this is quite a hack. The launcher should take care about that instead.
-  bool invoked_main_method = false;
-  if (HighLevelInterpreterClass != NULL && first_time_InvokeMain && method->name() == vmSymbols::main_name() && method->result_type() == T_VOID) {
-    assert(THREAD->is_Java_thread(), "other threads must not call into java");
-    JavaThread* thread = (JavaThread*)THREAD;
-    first_time_InvokeMain = false;
-    invoked_main_method = true;
-    thread->set_high_level_interpreter_in_vm(true);
-  }
-#endif
-
   JavaCalls::call(result, method, &java_args, CHECK);
 
   // Convert result
@@ -5172,16 +5157,6 @@
       compiler->initialize();
 #endif
 
-#ifdef HIGH_LEVEL_INTERPRETER
-      if (HighLevelInterpreterClass != NULL) {
-        bool result = VMToInterpreter::allocate_interpreter(HighLevelInterpreterClass, HighLevelInterpreterArguments, thread);
-        if (!result) {
-          vm_abort(false);
-          return JNI_ERR;
-        }
-      }
-#endif
-
     // Tracks the time application was running before GC
     RuntimeService::record_application_start();
 
--- a/src/share/vm/runtime/arguments.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -2132,19 +2132,24 @@
 
 // Parse JavaVMInitArgs structure
 #ifdef GRAAL
+static void prepend_to_graal_classpath(SysClassPath &cp, const char* path) {
+  cp.add_prefix(path);
+}
+
 static void prepend_to_graal_classpath(SysClassPath &cp, const char* graal_dir, const char* project) {
   const int BUFFER_SIZE = 1024;
   char path[BUFFER_SIZE];
 
   const char fileSep = *os::file_separator();
   sprintf(path, "%s%c%s%cbin", graal_dir, fileSep, project, fileSep);
+  
   DIR* dir = os::opendir(path);
   if (dir == NULL) {
     jio_fprintf(defaultStream::output_stream(), "Error while starting Graal VM: The Graal class directory %s could not be opened.\n", path);
     vm_exit(1);
   }
   os::closedir(dir);
-  cp.add_prefix(path);
+  prepend_to_graal_classpath(cp, path);
 }
 
 // Walk up the directory hierarchy starting from JAVA_HOME looking
@@ -2209,59 +2214,66 @@
     if (PrintVMOptions) {
       tty->print_cr("Running Graal VM... ");
     }
-    const int BUFFER_SIZE = 1024;
-    char graal_dir[BUFFER_SIZE];
-    if (!os::getenv("GRAAL", graal_dir, sizeof(graal_dir))) {
-      if (find_graal_dir(graal_dir) == false) {
-        jio_fprintf(defaultStream::output_stream(), "Error while starting Graal VM: The GRAAL environment variable needs to point to the directory containing the Graal projects.\n");
-        vm_exit(0);
+
+    SysClassPath scp_compiler("");
+
+    if (GraalClassPath != NULL) {
+      prepend_to_graal_classpath(scp_compiler, GraalClassPath);
+    } else {
+      const int BUFFER_SIZE = 1024;
+      char graal_dir[BUFFER_SIZE];
+      if (!os::getenv("GRAAL", graal_dir, sizeof(graal_dir))) {
+        if (find_graal_dir(graal_dir) == false) {
+          jio_fprintf(defaultStream::output_stream(), "Error while starting Graal VM: The GRAAL environment variable needs to point to the directory containing the Graal projects.\n");
+          vm_exit(0);
+        }
+      }
+      if (PrintVMOptions) tty->print_cr("GRAAL=%s", graal_dir);
+    
+      // this declaration is checked for correctness by 'mx build' - only
+      // modify its entries, not its name or shape
+      const char* graal_projects[] = {
+  #ifdef AMD64
+          "com.oracle.graal.amd64",
+          "com.oracle.graal.asm.amd64",
+          "com.oracle.graal.lir.amd64",
+          "com.oracle.graal.compiler.amd64",
+          "com.oracle.graal.hotspot.amd64",
+  #endif
+          "com.oracle.graal.api.runtime",
+          "com.oracle.graal.api.meta",
+          "com.oracle.graal.api.code",
+          "com.oracle.graal.api.interpreter",
+          "com.oracle.graal.hotspot",
+          "com.oracle.graal.asm",
+          "com.oracle.graal.alloc",
+          "com.oracle.graal.snippets",
+          "com.oracle.graal.compiler",
+          "com.oracle.graal.loop",
+          "com.oracle.graal.phases",
+          "com.oracle.graal.phases.common",
+          "com.oracle.graal.virtual",
+          "com.oracle.graal.nodes",
+          "com.oracle.graal.printer",
+          "com.oracle.graal.debug",
+          "com.oracle.graal.graph",
+          "com.oracle.graal.lir",
+          "com.oracle.graal.bytecode",
+          "com.oracle.graal.java"
+      };
+            
+      const int len = sizeof(graal_projects) / sizeof(char*);
+      for (int i = 0; i < len; i++) {
+        if (PrintVMOptions) {
+          tty->print_cr("Adding project directory %s to bootclasspath", graal_projects[i]);
+        }
+        prepend_to_graal_classpath(scp_compiler, graal_dir, graal_projects[i]);
       }
     }
-    if (PrintVMOptions) tty->print_cr("GRAAL=%s", graal_dir);
-    
-    // this declaration is checked for correctness by 'mx build' - only
-    // modify its entries, not its name or shape
-    const char* graal_projects[] = {
-#ifdef AMD64
-        "com.oracle.graal.amd64",
-        "com.oracle.graal.asm.amd64",
-        "com.oracle.graal.lir.amd64",
-        "com.oracle.graal.compiler.amd64",
-        "com.oracle.graal.hotspot.amd64",
-#endif
-        "com.oracle.graal.api.runtime",
-        "com.oracle.graal.api.meta",
-        "com.oracle.graal.api.code",
-        "com.oracle.graal.api.interpreter",
-        "com.oracle.graal.hotspot",
-        "com.oracle.graal.asm",
-        "com.oracle.graal.alloc",
-        "com.oracle.graal.snippets",
-        "com.oracle.graal.compiler",
-        "com.oracle.graal.loop",
-        "com.oracle.graal.phases",
-        "com.oracle.graal.phases.common",
-        "com.oracle.graal.virtual",
-        "com.oracle.graal.nodes",
-        "com.oracle.graal.printer",
-        "com.oracle.graal.debug",
-        "com.oracle.graal.graph",
-        "com.oracle.graal.lir",
-        "com.oracle.graal.bytecode",
-        "com.oracle.graal.java"
-    };
-
-    SysClassPath scp_compiler("");
-    const int len = sizeof(graal_projects) / sizeof(char*);
-    for (int i = 0; i < len; i++) {
-      if (PrintVMOptions) {
-        tty->print_cr("Adding project directory %s to bootclasspath", graal_projects[i]);
-      }
-      prepend_to_graal_classpath(scp_compiler, graal_dir, graal_projects[i]);
-    }
+
     scp_compiler.expand_endorsed();
-
     Arguments::set_compilerclasspath(scp_compiler.combined_path());
+
 #endif
 
   if (AggressiveOpts) {
--- a/src/share/vm/runtime/compilationPolicy.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -472,7 +472,15 @@
           }
         }
       }
+     
       if (!m->queued_for_compilation()) {
+        if (TraceCompilationPolicy) {
+          tty->print("method invocation trigger: ");
+          m->print_short_name(tty);
+          tty->print_cr(" ( interpreted " INTPTR_FORMAT ", size=%d, hotCount=%d, hotTime=" UINT64_FORMAT " ) ", (address)m(), m->code_size(), hot_count, hot_time);
+        }
+
+        assert(m->is_native() || m->method_data() != NULL, "do not compile code methods");
         CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier, m, hot_count, "count", thread);
       }
     }
--- a/src/share/vm/runtime/globals.hpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/runtime/globals.hpp	Tue Nov 27 12:12:02 2012 +0100
@@ -623,15 +623,6 @@
   develop(bool, InlineClassNatives, true,                                   \
           "inline Class.isInstance, etc")                                   \
                                                                             \
-  product(ccstr, HighLevelInterpreterClass, NULL,                           \
-          "fully qualified class name of the high-level interpreter")       \
-                                                                            \
-  product(ccstr, HighLevelInterpreterArguments, NULL,                       \
-          "arguments that are passed to the high-level interpreter")        \
-                                                                            \
-  notproduct(bool, PrintHighLevelInterpreterVMTransitions, false,           \
-          "print transitions between VM and high-level interpreter")        \
-                                                                            \
   develop(bool, InlineThreadNatives, true,                                  \
           "inline Thread.currentThread, etc")                               \
                                                                             \
@@ -2613,6 +2604,9 @@
   diagnostic(bool, PrintInterpreter, false,                                 \
           "Prints the generated interpreter code")                          \
                                                                             \
+  product(bool, PrintMachineCodeToFile, false,                              \
+          "Prints the generated machine code to a file (int + comp)")       \
+                                                                            \
   product(bool, UseInterpreter, true,                                       \
           "Use interpreter for non-compiled methods")                       \
                                                                             \
--- a/src/share/vm/runtime/init.cpp	Mon Nov 26 19:30:54 2012 -0800
+++ b/src/share/vm/runtime/init.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -34,6 +34,7 @@
 #include "runtime/init.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "utilities/machineCodePrinter.hpp"
 
 // Initialization done by VM thread in vm_init_globals()
 void check_ThreadShadow();
@@ -86,6 +87,10 @@
   mutex_init();
   chunkpool_init();
   perfMemory_init();
+
+  if(PrintMachineCodeToFile) {
+    MachineCodePrinter::initialize();
+  }
 }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/machineCodePrinter.cpp	Tue Nov 27 12:12:02 2012 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "utilities/machineCodePrinter.hpp"
+#include "utilities/ostream.hpp"
+
+fileStream* MachineCodePrinter::_st = NULL;
+volatile int MachineCodePrinter::_write_lock = 0;
+
+void MachineCodePrinter::initialize() {
+  _st = new (ResourceObj::C_HEAP, mtInternal) fileStream("machineCode.txt");
+}
+
+void MachineCodePrinter::print(nmethod* nm) {
+  lock();
+  Disassembler::decode(nm, _st);
+  unlock();
+}
+
+void MachineCodePrinter::print(CodeBlob* cb) {
+  lock();
+  Disassembler::decode(cb, _st);
+  unlock();
+}
+
+void MachineCodePrinter::print(StubQueue* stub_queue) {
+  lock();
+  stub_queue->print_on(_st);
+  unlock();
+}
+
+void MachineCodePrinter::flush() {
+  _st->flush();
+}
+
+void MachineCodePrinter::lock() {
+  Thread::SpinAcquire(&_write_lock, "Put");
+}
+
+void MachineCodePrinter::unlock() {
+  Thread::SpinRelease(&_write_lock);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/machineCodePrinter.hpp	Tue Nov 27 12:12:02 2012 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_VM_UTILITIES_MACHINE_CODE_PRINTER_HPP
+#define SHARE_VM_UTILITIES_MACHINE_CODE_PRINTER_HPP
+
+class MachineCodePrinter : public AllStatic {
+private:
+  static fileStream* _st;
+  static volatile int _write_lock;
+
+public:
+  static void initialize();
+  static void print(nmethod* nm);
+  static void print(CodeBlob* cb);
+  static void print(StubQueue* stub_queue);
+  static void flush();
+
+private:
+  static void lock();
+  static void unlock();
+};
+
+#endif // SHARE_VM_UTILITIES_MACHINE_CODE_PRINTER_HPP