changeset 9718:6623dda5fabb

Merge.
author Doug Simon <doug.simon@oracle.com>
date Wed, 15 May 2013 14:53:34 +0200
parents f8e0bf2c70e2 (current diff) e0b95acd24ae (diff)
children f6b1694360ec
files
diffstat 48 files changed, 4616 insertions(+), 3770 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Assumptions.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Assumptions.java	Wed May 15 14:53:34 2013 +0200
@@ -362,4 +362,9 @@
         count++;
     }
 
+    public void record(Assumptions assumptions) {
+        for (int i = 0; i < assumptions.count; i++) {
+            record(assumptions.list[i]);
+        }
+    }
 }
--- a/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/ResolvedJavaMethod.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/ResolvedJavaMethod.java	Wed May 15 14:53:34 2013 +0200
@@ -56,13 +56,6 @@
     int getCompiledCodeSize();
 
     /**
-     * Returns an estimate how complex it is to compile this method.
-     * 
-     * @return A value >= 0, where higher means more complex.
-     */
-    int getCompilationComplexity();
-
-    /**
      * Returns the {@link ResolvedJavaType} object representing the class or interface that declares
      * this method.
      */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/AbstractSPARCAssembler.java	Wed May 15 14:53:34 2013 +0200
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.sparc;
+
+import com.oracle.graal.api.code.AbstractAddress;
+import com.oracle.graal.api.code.Register;
+import com.oracle.graal.api.code.TargetDescription;
+import com.oracle.graal.asm.AbstractAssembler;
+import com.oracle.graal.asm.Label;
+
+public abstract class AbstractSPARCAssembler extends AbstractAssembler {
+
+    public AbstractSPARCAssembler(TargetDescription target) {
+        super(target);
+    }
+
+    @Override
+    public void align(int modulus) {
+        // SPARC: Implement alignment.
+    }
+
+    @Override
+    public void jmp(Label l) {
+        // SPARC: Implement jump.
+    }
+
+    @Override
+    protected void patchJumpTarget(int branch, int jumpTarget) {
+        // SPARC: Implement patching of jump target.
+    }
+
+    @Override
+    public AbstractAddress makeAddress(Register base, int displacement) {
+        // SPARC: Implement address calculation.
+        return null;
+    }
+
+    @Override
+    public AbstractAddress getPlaceholder() {
+        // SPARC: Implement address patching.
+        return null;
+    }
+}
--- a/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAssembler.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAssembler.java	Wed May 15 14:53:34 2013 +0200
@@ -23,13 +23,96 @@
 package com.oracle.graal.asm.sparc;
 
 import com.oracle.graal.api.code.*;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.sparc.*;
 
 /**
  * This class implements an assembler that can encode most SPARC instructions.
  */
-public class SPARCAssembler extends AbstractAssembler {
+public class SPARCAssembler extends AbstractSPARCAssembler {
+
+    public static final int ImmedTrue = 0x00002000;
+
+    public enum Ops {
+        CallOp(0x40000000),
+        BranchOp(0x00000000),
+        ArithOp(0x80000000),
+        LdstOp(0xC0000000);
+
+        private final int value;
+
+        private Ops(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+    }
+
+    public enum Op3s {
+        Add((0x00 << 19) & 0x01F80000, "add"),
+        And((0x01 << 19) & 0x01F80000, "and"),
+        Or((0x02 << 19) & 0x01F80000, "or"),
+        Xor((0x03 << 19) & 0x01F80000, "xor"),
+        Sub((0x04 << 19) & 0x01F80000, "sub"),
+        Andn((0x05 << 19) & 0x01F80000, "andn"),
+        Orn((0x06 << 19) & 0x01F80000, "orn"),
+        Xnor((0x07 << 19) & 0x01F80000, "xnor"),
+        Addc((0x08 << 19) & 0x01F80000, "addc"),
+        Mulx((0x09 << 19) & 0x01F80000, "mulx"),
+        Umul((0x0A << 19) & 0x01F80000, "umul"),
+        Smul((0x0B << 19) & 0x01F80000, "smul"),
+        Subc((0x0C << 19) & 0x01F80000, "subc"),
+        Udivx((0x0D << 19) & 0x01F80000, "udivx"),
+        Udiv((0x0E << 19) & 0x01F80000, "udiv"),
+        Sdiv((0x0F << 19) & 0x01F80000, "sdiv"),
+
+        Addcc((0x10 << 19) & 0x01F80000, "addcc"),
+        Andcc((0x11 << 19) & 0x01F80000, "andcc"),
+        Orcc((0x12 << 19) & 0x01F80000, "orcc"),
+        Xorcc((0x13 << 19) & 0x01F80000, "xorcc"),
+        Subcc((0x14 << 19) & 0x01F80000, "subcc"),
+        Andncc((0x15 << 19) & 0x01F80000, "andncc"),
+        Orncc((0x16 << 19) & 0x01F80000, "orncc"),
+        Xnorcc((0x17 << 19) & 0x01F80000, "xnorcc"),
+        Addccc((0x18 << 19) & 0x01F80000, "addccc"),
+        Mulxcc((0x19 << 19) & 0x01F80000, "mulxcc"),
+        Umulcc((0x1A << 19) & 0x01F80000, "umulcc"),
+        Smulcc((0x1B << 19) & 0x01F80000, "smulcc"),
+        Subccc((0x1C << 19) & 0x01F80000, "subccc"),
+        Udivcc((0x1E << 19) & 0x01F80000, "udivcc"),
+        Sdivcc((0x1F << 19) & 0x01F80000, "sdivcc"),
+
+        Taddcc((0x20 << 19) & 0x01F80000, "taddcc"),
+        Tsubcc((0x21 << 19) & 0x01F80000, "tsubcc"),
+        Taddcctv((0x22 << 19) & 0x01F80000, "taddcctv"),
+        Tsubcctv((0x23 << 19) & 0x01F80000, "tsubcctv"),
+        Mulscc((0x23 << 19) & 0x01F80000, "mulscc"),
+        Sll((0x25 << 19) & 0x01F80000, "sll"),
+        Sllx((0x25 << 19) & 0x01F80000, "sllx"),
+        Srl((0x26 << 19) & 0x01F80000, "srl"),
+        Srlx((0x26 << 19) & 0x01F80000, "srlx"),
+        Sra((0x27 << 19) & 0x01F80000, "srax"),
+        Srax((0x27 << 19) & 0x01F80000, "srax"),
+        Rdreg((0x27 << 19) & 0x01F80000, "rdreg"),
+        Membar((0x27 << 19) & 0x01F80000, "membar");
+
+        private final int value;
+        private final String operator;
+
+        private Op3s(int value, String op) {
+            this.value = value;
+            this.operator = op;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+    }
 
     @SuppressWarnings("unused")
     public SPARCAssembler(TargetDescription target) {
@@ -38,30 +121,391 @@
         SPARC sparc;
     }
 
-    @Override
-    public void align(int modulus) {
-        // SPARC: Implement alignment.
+    public static final int rs1(int val) {
+        return val;
+    }
+
+    public static final int rs2(int val) {
+        return val;
+    }
+
+    public static final int rd(int val) {
+        return val;
+    }
+
+    public static final int sx1 = 0x00001000;
+
+    public static final int simm(int x, int nbits) {
+        // assert_signed_range(x, nbits);
+        return x & ((1 << nbits) - 1);
+    }
+
+    public final void add(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Add.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void add(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Add.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void addcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Addcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void addcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Addcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void addc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Addc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void addc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Addc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void addccc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Addccc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void addccc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Addccc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void and(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.And.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void and(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.And.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void andcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Andcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void andcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Andcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void andn(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Andn.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void andn(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Andn.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void andncc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Andncc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void andncc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Andncc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void mulscc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Mulscc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void mulscc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Mulscc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
     }
 
-    @Override
-    public void jmp(Label l) {
-        // SPARC: Implement jump.
+    public final void mulx(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Mulx.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void mulx(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Mulx.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void or(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Or.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void or(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Or.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void orcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Orcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void orcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Orcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void orn(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Orn.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void orn(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Orn.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void orncc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Orncc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void orncc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Orncc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void rdy(Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Rdreg.getValue() | rd(dst.encoding()));
+    }
+
+    // A.44 Read State Register
+
+    public final void rdccr(Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Rdreg.getValue() | rd(dst.encoding()) | 0x00008000);
+    }
+
+    public final void rdasi(Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Rdreg.getValue() | rd(dst.encoding()) | 0x0000C000);
+    }
+
+    public final void rdtick(Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Rdreg.getValue() | rd(dst.encoding()) | 0x00010000);
+    }
+
+    public final void rdpc(Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Rdreg.getValue() | rd(dst.encoding()) | 0x00014000);
+    }
+
+    public final void rdfprs(Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Rdreg.getValue() | rd(dst.encoding()) | 0x00018000);
+    }
+
+    @Deprecated
+    public final void sdiv(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sdiv.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    @Deprecated
+    public final void sdiv(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sdiv.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    @Deprecated
+    public final void sdivcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sdivcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    @Deprecated
+    public final void sdivcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sdivcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void sll(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sll.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void sll(Register src1, int imm5a, Register dst) {
+        assert imm5a < 0x40;
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sll.getValue() | rs1(src1.encoding()) | ImmedTrue | imm5a | rd(dst.encoding()));
+    }
+
+    public final void sllx(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sllx.getValue() | sx1 | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
     }
 
-    @Override
-    protected void patchJumpTarget(int branch, int jumpTarget) {
-        // SPARC: Implement patching of jump target.
+    public final void sllx(Register src1, int imm5a, Register dst) {
+        assert imm5a < 0x40;
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sllx.getValue() | sx1 | rs1(src1.encoding()) | ImmedTrue | imm5a | rd(dst.encoding()));
+    }
+
+    public final void smul(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Smul.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void smul(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Smul.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void smulcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Smulcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void smulcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Smulcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void sra(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sra.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void sra(Register src1, int imm5a, Register dst) {
+        assert imm5a < 0x40;
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sra.getValue() | rs1(src1.encoding()) | ImmedTrue | imm5a | rd(dst.encoding()));
+    }
+
+    public final void srax(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Srax.getValue() | sx1 | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void srax(Register src1, int imm5a, Register dst) {
+        assert imm5a < 0x40;
+        emitInt(Ops.ArithOp.getValue() | Op3s.Srax.getValue() | sx1 | rs1(src1.encoding()) | ImmedTrue | imm5a | rd(dst.encoding()));
+    }
+
+    public final void srl(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Srl.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void srl(Register src1, int imm5a, Register dst) {
+        assert imm5a < 0x40;
+        emitInt(Ops.ArithOp.getValue() | Op3s.Srl.getValue() | rs1(src1.encoding()) | ImmedTrue | imm5a | rd(dst.encoding()));
+    }
+
+    public final void srlx(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Srlx.getValue() | sx1 | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void srlx(Register src1, int imm5a, Register dst) {
+        assert imm5a < 0x40;
+        emitInt(Ops.ArithOp.getValue() | Op3s.Srlx.getValue() | sx1 | rs1(src1.encoding()) | ImmedTrue | imm5a | rd(dst.encoding()));
+    }
+
+    public final void sub(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sub.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void sub(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Sub.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void subcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Subcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void subcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Subcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void subc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Subc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void subc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Subc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void subccc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Subccc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void subccc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Subccc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void taddcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Taddcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void taddcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Taddcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
     }
 
-    @Override
-    public AbstractAddress makeAddress(Register base, int displacement) {
-        // SPARC: Implement address calculation.
-        return null;
+    public final void taddcctv(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Taddcctv.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void taddcctv(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Taddcctv.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void tsubcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Tsubcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void tsubcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Tsubcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void tsubcctv(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Tsubcctv.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void tsubcctv(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Tsubcctv.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    @Deprecated
+    public final void udiv(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Udiv.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    @Deprecated
+    public final void udiv(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Udiv.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    @Deprecated
+    public final void udivcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Udivcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    @Deprecated
+    public final void udivcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Udivcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void udivx(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Udivx.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
     }
 
-    @Override
-    public AbstractAddress getPlaceholder() {
-        // SPARC: Implement address patching.
-        return null;
+    public final void udivx(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Udivx.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void umul(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Umul.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void umul(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Umul.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void umulcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Umulcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void umulcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Umulcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void xor(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xor.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void xor(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xor.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
     }
+
+    public final void xorcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xorcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void xorcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xorcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void xnor(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xnor.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void xnor(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xnor.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
+    public final void xnorcc(Register src1, Register src2, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xnorcc.getValue() | rs1(src1.encoding()) | rs2(src2.encoding()) | rd(dst.encoding()));
+    }
+
+    public final void xnorcc(Register src1, int simm13, Register dst) {
+        emitInt(Ops.ArithOp.getValue() | Op3s.Xnorcc.getValue() | rs1(src1.encoding()) | ImmedTrue | simm(simm13, 13) | rd(dst.encoding()));
+    }
+
 }
--- a/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/Bytecodes.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/Bytecodes.java	Wed May 15 14:53:34 2013 +0200
@@ -357,218 +357,212 @@
      */
     private static final int[] lengthArray = new int[256];
 
-    /**
-     * An array that maps from a bytecode value to the estimated complexity of the bytecode in terms
-     * of generated machine code.
-     */
-    private static final int[] compilationComplexityArray = new int[256];
-
     // Checkstyle: stop
     // @formatter:off
     static {
-        def(NOP                 , "nop"             , "b"    , 0);
-        def(ACONST_NULL         , "aconst_null"     , "b"    , 0);
-        def(ICONST_M1           , "iconst_m1"       , "b"    , 0);
-        def(ICONST_0            , "iconst_0"        , "b"    , 0);
-        def(ICONST_1            , "iconst_1"        , "b"    , 0);
-        def(ICONST_2            , "iconst_2"        , "b"    , 0);
-        def(ICONST_3            , "iconst_3"        , "b"    , 0);
-        def(ICONST_4            , "iconst_4"        , "b"    , 0);
-        def(ICONST_5            , "iconst_5"        , "b"    , 0);
-        def(LCONST_0            , "lconst_0"        , "b"    , 0);
-        def(LCONST_1            , "lconst_1"        , "b"    , 0);
-        def(FCONST_0            , "fconst_0"        , "b"    , 0);
-        def(FCONST_1            , "fconst_1"        , "b"    , 0);
-        def(FCONST_2            , "fconst_2"        , "b"    , 0);
-        def(DCONST_0            , "dconst_0"        , "b"    , 0);
-        def(DCONST_1            , "dconst_1"        , "b"    , 0);
-        def(BIPUSH              , "bipush"          , "bc"   , 0);
-        def(SIPUSH              , "sipush"          , "bcc"  , 0);
-        def(LDC                 , "ldc"             , "bi"   , 0, TRAP);
-        def(LDC_W               , "ldc_w"           , "bii"  , 0, TRAP);
-        def(LDC2_W              , "ldc2_w"          , "bii"  , 0, TRAP);
-        def(ILOAD               , "iload"           , "bi"   , 0, LOAD);
-        def(LLOAD               , "lload"           , "bi"   , 0, LOAD);
-        def(FLOAD               , "fload"           , "bi"   , 0, LOAD);
-        def(DLOAD               , "dload"           , "bi"   , 0, LOAD);
-        def(ALOAD               , "aload"           , "bi"   , 0, LOAD);
-        def(ILOAD_0             , "iload_0"         , "b"    , 0, LOAD);
-        def(ILOAD_1             , "iload_1"         , "b"    , 0, LOAD);
-        def(ILOAD_2             , "iload_2"         , "b"    , 0, LOAD);
-        def(ILOAD_3             , "iload_3"         , "b"    , 0, LOAD);
-        def(LLOAD_0             , "lload_0"         , "b"    , 0, LOAD);
-        def(LLOAD_1             , "lload_1"         , "b"    , 0, LOAD);
-        def(LLOAD_2             , "lload_2"         , "b"    , 0, LOAD);
-        def(LLOAD_3             , "lload_3"         , "b"    , 0, LOAD);
-        def(FLOAD_0             , "fload_0"         , "b"    , 0, LOAD);
-        def(FLOAD_1             , "fload_1"         , "b"    , 0, LOAD);
-        def(FLOAD_2             , "fload_2"         , "b"    , 0, LOAD);
-        def(FLOAD_3             , "fload_3"         , "b"    , 0, LOAD);
-        def(DLOAD_0             , "dload_0"         , "b"    , 0, LOAD);
-        def(DLOAD_1             , "dload_1"         , "b"    , 0, LOAD);
-        def(DLOAD_2             , "dload_2"         , "b"    , 0, LOAD);
-        def(DLOAD_3             , "dload_3"         , "b"    , 0, LOAD);
-        def(ALOAD_0             , "aload_0"         , "b"    , 0, LOAD);
-        def(ALOAD_1             , "aload_1"         , "b"    , 0, LOAD);
-        def(ALOAD_2             , "aload_2"         , "b"    , 0, LOAD);
-        def(ALOAD_3             , "aload_3"         , "b"    , 0, LOAD);
-        def(IALOAD              , "iaload"          , "b"    , 0, TRAP);
-        def(LALOAD              , "laload"          , "b"    , 0, TRAP);
-        def(FALOAD              , "faload"          , "b"    , 0, TRAP);
-        def(DALOAD              , "daload"          , "b"    , 0, TRAP);
-        def(AALOAD              , "aaload"          , "b"    , 0, TRAP);
-        def(BALOAD              , "baload"          , "b"    , 0, TRAP);
-        def(CALOAD              , "caload"          , "b"    , 0, TRAP);
-        def(SALOAD              , "saload"          , "b"    , 0, TRAP);
-        def(ISTORE              , "istore"          , "bi"   , 0, STORE);
-        def(LSTORE              , "lstore"          , "bi"   , 0, STORE);
-        def(FSTORE              , "fstore"          , "bi"   , 0, STORE);
-        def(DSTORE              , "dstore"          , "bi"   , 0, STORE);
-        def(ASTORE              , "astore"          , "bi"   , 0, STORE);
-        def(ISTORE_0            , "istore_0"        , "b"    , 0, STORE);
-        def(ISTORE_1            , "istore_1"        , "b"    , 0, STORE);
-        def(ISTORE_2            , "istore_2"        , "b"    , 0, STORE);
-        def(ISTORE_3            , "istore_3"        , "b"    , 0, STORE);
-        def(LSTORE_0            , "lstore_0"        , "b"    , 0, STORE);
-        def(LSTORE_1            , "lstore_1"        , "b"    , 0, STORE);
-        def(LSTORE_2            , "lstore_2"        , "b"    , 0, STORE);
-        def(LSTORE_3            , "lstore_3"        , "b"    , 0, STORE);
-        def(FSTORE_0            , "fstore_0"        , "b"    , 0, STORE);
-        def(FSTORE_1            , "fstore_1"        , "b"    , 0, STORE);
-        def(FSTORE_2            , "fstore_2"        , "b"    , 0, STORE);
-        def(FSTORE_3            , "fstore_3"        , "b"    , 0, STORE);
-        def(DSTORE_0            , "dstore_0"        , "b"    , 0, STORE);
-        def(DSTORE_1            , "dstore_1"        , "b"    , 0, STORE);
-        def(DSTORE_2            , "dstore_2"        , "b"    , 0, STORE);
-        def(DSTORE_3            , "dstore_3"        , "b"    , 0, STORE);
-        def(ASTORE_0            , "astore_0"        , "b"    , 0, STORE);
-        def(ASTORE_1            , "astore_1"        , "b"    , 0, STORE);
-        def(ASTORE_2            , "astore_2"        , "b"    , 0, STORE);
-        def(ASTORE_3            , "astore_3"        , "b"    , 0, STORE);
-        def(IASTORE             , "iastore"         , "b"    , 3, TRAP);
-        def(LASTORE             , "lastore"         , "b"    , 3, TRAP);
-        def(FASTORE             , "fastore"         , "b"    , 3, TRAP);
-        def(DASTORE             , "dastore"         , "b"    , 3, TRAP);
-        def(AASTORE             , "aastore"         , "b"    , 4, TRAP);
-        def(BASTORE             , "bastore"         , "b"    , 3, TRAP);
-        def(CASTORE             , "castore"         , "b"    , 3, TRAP);
-        def(SASTORE             , "sastore"         , "b"    , 3, TRAP);
-        def(POP                 , "pop"             , "b"    , 0);
-        def(POP2                , "pop2"            , "b"    , 0);
-        def(DUP                 , "dup"             , "b"    , 0);
-        def(DUP_X1              , "dup_x1"          , "b"    , 0);
-        def(DUP_X2              , "dup_x2"          , "b"    , 0);
-        def(DUP2                , "dup2"            , "b"    , 0);
-        def(DUP2_X1             , "dup2_x1"         , "b"    , 0);
-        def(DUP2_X2             , "dup2_x2"         , "b"    , 0);
-        def(SWAP                , "swap"            , "b"    , 0);
-        def(IADD                , "iadd"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(LADD                , "ladd"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(FADD                , "fadd"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(DADD                , "dadd"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(ISUB                , "isub"            , "b"    , 1);
-        def(LSUB                , "lsub"            , "b"    , 1);
-        def(FSUB                , "fsub"            , "b"    , 1);
-        def(DSUB                , "dsub"            , "b"    , 1);
-        def(IMUL                , "imul"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(LMUL                , "lmul"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(FMUL                , "fmul"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(DMUL                , "dmul"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(IDIV                , "idiv"            , "b"    , 1, TRAP);
-        def(LDIV                , "ldiv"            , "b"    , 1, TRAP);
-        def(FDIV                , "fdiv"            , "b"    , 1);
-        def(DDIV                , "ddiv"            , "b"    , 1);
-        def(IREM                , "irem"            , "b"    , 1, TRAP);
-        def(LREM                , "lrem"            , "b"    , 1, TRAP);
-        def(FREM                , "frem"            , "b"    , 1);
-        def(DREM                , "drem"            , "b"    , 1);
-        def(INEG                , "ineg"            , "b"    , 1);
-        def(LNEG                , "lneg"            , "b"    , 1);
-        def(FNEG                , "fneg"            , "b"    , 1);
-        def(DNEG                , "dneg"            , "b"    , 1);
-        def(ISHL                , "ishl"            , "b"    , 1);
-        def(LSHL                , "lshl"            , "b"    , 1);
-        def(ISHR                , "ishr"            , "b"    , 1);
-        def(LSHR                , "lshr"            , "b"    , 1);
-        def(IUSHR               , "iushr"           , "b"    , 1);
-        def(LUSHR               , "lushr"           , "b"    , 1);
-        def(IAND                , "iand"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(LAND                , "land"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(IOR                 , "ior"             , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(LOR                 , "lor"             , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(IXOR                , "ixor"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(LXOR                , "lxor"            , "b"    , 1, COMMUTATIVE | ASSOCIATIVE);
-        def(IINC                , "iinc"            , "bic"  , 1, LOAD | STORE);
-        def(I2L                 , "i2l"             , "b"    , 1);
-        def(I2F                 , "i2f"             , "b"    , 1);
-        def(I2D                 , "i2d"             , "b"    , 1);
-        def(L2I                 , "l2i"             , "b"    , 1);
-        def(L2F                 , "l2f"             , "b"    , 1);
-        def(L2D                 , "l2d"             , "b"    , 1);
-        def(F2I                 , "f2i"             , "b"    , 1);
-        def(F2L                 , "f2l"             , "b"    , 1);
-        def(F2D                 , "f2d"             , "b"    , 1);
-        def(D2I                 , "d2i"             , "b"    , 1);
-        def(D2L                 , "d2l"             , "b"    , 1);
-        def(D2F                 , "d2f"             , "b"    , 1);
-        def(I2B                 , "i2b"             , "b"    , 1);
-        def(I2C                 , "i2c"             , "b"    , 1);
-        def(I2S                 , "i2s"             , "b"    , 1);
-        def(LCMP                , "lcmp"            , "b"    , 1);
-        def(FCMPL               , "fcmpl"           , "b"    , 1);
-        def(FCMPG               , "fcmpg"           , "b"    , 1);
-        def(DCMPL               , "dcmpl"           , "b"    , 1);
-        def(DCMPG               , "dcmpg"           , "b"    , 1);
-        def(IFEQ                , "ifeq"            , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IFNE                , "ifne"            , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IFLT                , "iflt"            , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IFGE                , "ifge"            , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IFGT                , "ifgt"            , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IFLE                , "ifle"            , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IF_ICMPEQ           , "if_icmpeq"       , "boo"  , 2, COMMUTATIVE | FALL_THROUGH | BRANCH);
-        def(IF_ICMPNE           , "if_icmpne"       , "boo"  , 2, COMMUTATIVE | FALL_THROUGH | BRANCH);
-        def(IF_ICMPLT           , "if_icmplt"       , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IF_ICMPGE           , "if_icmpge"       , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IF_ICMPGT           , "if_icmpgt"       , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IF_ICMPLE           , "if_icmple"       , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IF_ACMPEQ           , "if_acmpeq"       , "boo"  , 2, COMMUTATIVE | FALL_THROUGH | BRANCH);
-        def(IF_ACMPNE           , "if_acmpne"       , "boo"  , 2, COMMUTATIVE | FALL_THROUGH | BRANCH);
-        def(GOTO                , "goto"            , "boo"  , 1, STOP | BRANCH);
-        def(JSR                 , "jsr"             , "boo"  , 0, STOP | BRANCH);
-        def(RET                 , "ret"             , "bi"   , 0, STOP);
-        def(TABLESWITCH         , "tableswitch"     , ""     , 4, STOP);
-        def(LOOKUPSWITCH        , "lookupswitch"    , ""     , 4, STOP);
-        def(IRETURN             , "ireturn"         , "b"    , 1, TRAP | STOP);
-        def(LRETURN             , "lreturn"         , "b"    , 1, TRAP | STOP);
-        def(FRETURN             , "freturn"         , "b"    , 1, TRAP | STOP);
-        def(DRETURN             , "dreturn"         , "b"    , 1, TRAP | STOP);
-        def(ARETURN             , "areturn"         , "b"    , 1, TRAP | STOP);
-        def(RETURN              , "return"          , "b"    , 1, TRAP | STOP);
-        def(GETSTATIC           , "getstatic"       , "bjj"  , 2, TRAP | FIELD_READ);
-        def(PUTSTATIC           , "putstatic"       , "bjj"  , 2, TRAP | FIELD_WRITE);
-        def(GETFIELD            , "getfield"        , "bjj"  , 2, TRAP | FIELD_READ);
-        def(PUTFIELD            , "putfield"        , "bjj"  , 2, TRAP | FIELD_WRITE);
-        def(INVOKEVIRTUAL       , "invokevirtual"   , "bjj"  , 7, TRAP | INVOKE);
-        def(INVOKESPECIAL       , "invokespecial"   , "bjj"  , 5, TRAP | INVOKE);
-        def(INVOKESTATIC        , "invokestatic"    , "bjj"  , 5, TRAP | INVOKE);
-        def(INVOKEINTERFACE     , "invokeinterface" , "bjja_", 7, TRAP | INVOKE);
-        def(INVOKEDYNAMIC       , "invokedynamic"   , "bjjjj", 7, TRAP | INVOKE);
-        def(NEW                 , "new"             , "bii"  , 6, TRAP);
-        def(NEWARRAY            , "newarray"        , "bc"   , 6, TRAP);
-        def(ANEWARRAY           , "anewarray"       , "bii"  , 6, TRAP);
-        def(ARRAYLENGTH         , "arraylength"     , "b"    , 2, TRAP);
-        def(ATHROW              , "athrow"          , "b"    , 5, TRAP | STOP);
-        def(CHECKCAST           , "checkcast"       , "bii"  , 3, TRAP);
-        def(INSTANCEOF          , "instanceof"      , "bii"  , 4, TRAP);
-        def(MONITORENTER        , "monitorenter"    , "b"    , 5, TRAP);
-        def(MONITOREXIT         , "monitorexit"     , "b"    , 5, TRAP);
-        def(WIDE                , "wide"            , ""     , 0);
-        def(MULTIANEWARRAY      , "multianewarray"  , "biic" , 6, TRAP);
-        def(IFNULL              , "ifnull"          , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(IFNONNULL           , "ifnonnull"       , "boo"  , 2, FALL_THROUGH | BRANCH);
-        def(GOTO_W              , "goto_w"          , "boooo", 1, STOP | BRANCH);
-        def(JSR_W               , "jsr_w"           , "boooo", 0, STOP | BRANCH);
-        def(BREAKPOINT          , "breakpoint"      , "b"    , 0, TRAP);
+        def(NOP                 , "nop"             , "b"    );
+        def(ACONST_NULL         , "aconst_null"     , "b"    );
+        def(ICONST_M1           , "iconst_m1"       , "b"    );
+        def(ICONST_0            , "iconst_0"        , "b"    );
+        def(ICONST_1            , "iconst_1"        , "b"    );
+        def(ICONST_2            , "iconst_2"        , "b"    );
+        def(ICONST_3            , "iconst_3"        , "b"    );
+        def(ICONST_4            , "iconst_4"        , "b"    );
+        def(ICONST_5            , "iconst_5"        , "b"    );
+        def(LCONST_0            , "lconst_0"        , "b"    );
+        def(LCONST_1            , "lconst_1"        , "b"    );
+        def(FCONST_0            , "fconst_0"        , "b"    );
+        def(FCONST_1            , "fconst_1"        , "b"    );
+        def(FCONST_2            , "fconst_2"        , "b"    );
+        def(DCONST_0            , "dconst_0"        , "b"    );
+        def(DCONST_1            , "dconst_1"        , "b"    );
+        def(BIPUSH              , "bipush"          , "bc"   );
+        def(SIPUSH              , "sipush"          , "bcc"  );
+        def(LDC                 , "ldc"             , "bi"   , TRAP);
+        def(LDC_W               , "ldc_w"           , "bii"  , TRAP);
+        def(LDC2_W              , "ldc2_w"          , "bii"  , TRAP);
+        def(ILOAD               , "iload"           , "bi"   , LOAD);
+        def(LLOAD               , "lload"           , "bi"   , LOAD);
+        def(FLOAD               , "fload"           , "bi"   , LOAD);
+        def(DLOAD               , "dload"           , "bi"   , LOAD);
+        def(ALOAD               , "aload"           , "bi"   , LOAD);
+        def(ILOAD_0             , "iload_0"         , "b"    , LOAD);
+        def(ILOAD_1             , "iload_1"         , "b"    , LOAD);
+        def(ILOAD_2             , "iload_2"         , "b"    , LOAD);
+        def(ILOAD_3             , "iload_3"         , "b"    , LOAD);
+        def(LLOAD_0             , "lload_0"         , "b"    , LOAD);
+        def(LLOAD_1             , "lload_1"         , "b"    , LOAD);
+        def(LLOAD_2             , "lload_2"         , "b"    , LOAD);
+        def(LLOAD_3             , "lload_3"         , "b"    , LOAD);
+        def(FLOAD_0             , "fload_0"         , "b"    , LOAD);
+        def(FLOAD_1             , "fload_1"         , "b"    , LOAD);
+        def(FLOAD_2             , "fload_2"         , "b"    , LOAD);
+        def(FLOAD_3             , "fload_3"         , "b"    , LOAD);
+        def(DLOAD_0             , "dload_0"         , "b"    , LOAD);
+        def(DLOAD_1             , "dload_1"         , "b"    , LOAD);
+        def(DLOAD_2             , "dload_2"         , "b"    , LOAD);
+        def(DLOAD_3             , "dload_3"         , "b"    , LOAD);
+        def(ALOAD_0             , "aload_0"         , "b"    , LOAD);
+        def(ALOAD_1             , "aload_1"         , "b"    , LOAD);
+        def(ALOAD_2             , "aload_2"         , "b"    , LOAD);
+        def(ALOAD_3             , "aload_3"         , "b"    , LOAD);
+        def(IALOAD              , "iaload"          , "b"    , TRAP);
+        def(LALOAD              , "laload"          , "b"    , TRAP);
+        def(FALOAD              , "faload"          , "b"    , TRAP);
+        def(DALOAD              , "daload"          , "b"    , TRAP);
+        def(AALOAD              , "aaload"          , "b"    , TRAP);
+        def(BALOAD              , "baload"          , "b"    , TRAP);
+        def(CALOAD              , "caload"          , "b"    , TRAP);
+        def(SALOAD              , "saload"          , "b"    , TRAP);
+        def(ISTORE              , "istore"          , "bi"   , STORE);
+        def(LSTORE              , "lstore"          , "bi"   , STORE);
+        def(FSTORE              , "fstore"          , "bi"   , STORE);
+        def(DSTORE              , "dstore"          , "bi"   , STORE);
+        def(ASTORE              , "astore"          , "bi"   , STORE);
+        def(ISTORE_0            , "istore_0"        , "b"    , STORE);
+        def(ISTORE_1            , "istore_1"        , "b"    , STORE);
+        def(ISTORE_2            , "istore_2"        , "b"    , STORE);
+        def(ISTORE_3            , "istore_3"        , "b"    , STORE);
+        def(LSTORE_0            , "lstore_0"        , "b"    , STORE);
+        def(LSTORE_1            , "lstore_1"        , "b"    , STORE);
+        def(LSTORE_2            , "lstore_2"        , "b"    , STORE);
+        def(LSTORE_3            , "lstore_3"        , "b"    , STORE);
+        def(FSTORE_0            , "fstore_0"        , "b"    , STORE);
+        def(FSTORE_1            , "fstore_1"        , "b"    , STORE);
+        def(FSTORE_2            , "fstore_2"        , "b"    , STORE);
+        def(FSTORE_3            , "fstore_3"        , "b"    , STORE);
+        def(DSTORE_0            , "dstore_0"        , "b"    , STORE);
+        def(DSTORE_1            , "dstore_1"        , "b"    , STORE);
+        def(DSTORE_2            , "dstore_2"        , "b"    , STORE);
+        def(DSTORE_3            , "dstore_3"        , "b"    , STORE);
+        def(ASTORE_0            , "astore_0"        , "b"    , STORE);
+        def(ASTORE_1            , "astore_1"        , "b"    , STORE);
+        def(ASTORE_2            , "astore_2"        , "b"    , STORE);
+        def(ASTORE_3            , "astore_3"        , "b"    , STORE);
+        def(IASTORE             , "iastore"         , "b"    , TRAP);
+        def(LASTORE             , "lastore"         , "b"    , TRAP);
+        def(FASTORE             , "fastore"         , "b"    , TRAP);
+        def(DASTORE             , "dastore"         , "b"    , TRAP);
+        def(AASTORE             , "aastore"         , "b"    , TRAP);
+        def(BASTORE             , "bastore"         , "b"    , TRAP);
+        def(CASTORE             , "castore"         , "b"    , TRAP);
+        def(SASTORE             , "sastore"         , "b"    , TRAP);
+        def(POP                 , "pop"             , "b"    );
+        def(POP2                , "pop2"            , "b"    );
+        def(DUP                 , "dup"             , "b"    );
+        def(DUP_X1              , "dup_x1"          , "b"    );
+        def(DUP_X2              , "dup_x2"          , "b"    );
+        def(DUP2                , "dup2"            , "b"    );
+        def(DUP2_X1             , "dup2_x1"         , "b"    );
+        def(DUP2_X2             , "dup2_x2"         , "b"    );
+        def(SWAP                , "swap"            , "b"    );
+        def(IADD                , "iadd"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(LADD                , "ladd"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(FADD                , "fadd"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(DADD                , "dadd"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(ISUB                , "isub"            , "b"    );
+        def(LSUB                , "lsub"            , "b"    );
+        def(FSUB                , "fsub"            , "b"    );
+        def(DSUB                , "dsub"            , "b"    );
+        def(IMUL                , "imul"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(LMUL                , "lmul"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(FMUL                , "fmul"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(DMUL                , "dmul"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(IDIV                , "idiv"            , "b"    , TRAP);
+        def(LDIV                , "ldiv"            , "b"    , TRAP);
+        def(FDIV                , "fdiv"            , "b"    );
+        def(DDIV                , "ddiv"            , "b"    );
+        def(IREM                , "irem"            , "b"    , TRAP);
+        def(LREM                , "lrem"            , "b"    , TRAP);
+        def(FREM                , "frem"            , "b"    );
+        def(DREM                , "drem"            , "b"    );
+        def(INEG                , "ineg"            , "b"    );
+        def(LNEG                , "lneg"            , "b"    );
+        def(FNEG                , "fneg"            , "b"    );
+        def(DNEG                , "dneg"            , "b"    );
+        def(ISHL                , "ishl"            , "b"    );
+        def(LSHL                , "lshl"            , "b"    );
+        def(ISHR                , "ishr"            , "b"    );
+        def(LSHR                , "lshr"            , "b"    );
+        def(IUSHR               , "iushr"           , "b"    );
+        def(LUSHR               , "lushr"           , "b"    );
+        def(IAND                , "iand"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(LAND                , "land"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(IOR                 , "ior"             , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(LOR                 , "lor"             , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(IXOR                , "ixor"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(LXOR                , "lxor"            , "b"    , COMMUTATIVE | ASSOCIATIVE);
+        def(IINC                , "iinc"            , "bic"  , LOAD | STORE);
+        def(I2L                 , "i2l"             , "b"    );
+        def(I2F                 , "i2f"             , "b"    );
+        def(I2D                 , "i2d"             , "b"    );
+        def(L2I                 , "l2i"             , "b"    );
+        def(L2F                 , "l2f"             , "b"    );
+        def(L2D                 , "l2d"             , "b"    );
+        def(F2I                 , "f2i"             , "b"    );
+        def(F2L                 , "f2l"             , "b"    );
+        def(F2D                 , "f2d"             , "b"    );
+        def(D2I                 , "d2i"             , "b"    );
+        def(D2L                 , "d2l"             , "b"    );
+        def(D2F                 , "d2f"             , "b"    );
+        def(I2B                 , "i2b"             , "b"    );
+        def(I2C                 , "i2c"             , "b"    );
+        def(I2S                 , "i2s"             , "b"    );
+        def(LCMP                , "lcmp"            , "b"    );
+        def(FCMPL               , "fcmpl"           , "b"    );
+        def(FCMPG               , "fcmpg"           , "b"    );
+        def(DCMPL               , "dcmpl"           , "b"    );
+        def(DCMPG               , "dcmpg"           , "b"    );
+        def(IFEQ                , "ifeq"            , "boo"  , FALL_THROUGH | BRANCH);
+        def(IFNE                , "ifne"            , "boo"  , FALL_THROUGH | BRANCH);
+        def(IFLT                , "iflt"            , "boo"  , FALL_THROUGH | BRANCH);
+        def(IFGE                , "ifge"            , "boo"  , FALL_THROUGH | BRANCH);
+        def(IFGT                , "ifgt"            , "boo"  , FALL_THROUGH | BRANCH);
+        def(IFLE                , "ifle"            , "boo"  , FALL_THROUGH | BRANCH);
+        def(IF_ICMPEQ           , "if_icmpeq"       , "boo"  , COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(IF_ICMPNE           , "if_icmpne"       , "boo"  , COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(IF_ICMPLT           , "if_icmplt"       , "boo"  , FALL_THROUGH | BRANCH);
+        def(IF_ICMPGE           , "if_icmpge"       , "boo"  , FALL_THROUGH | BRANCH);
+        def(IF_ICMPGT           , "if_icmpgt"       , "boo"  , FALL_THROUGH | BRANCH);
+        def(IF_ICMPLE           , "if_icmple"       , "boo"  , FALL_THROUGH | BRANCH);
+        def(IF_ACMPEQ           , "if_acmpeq"       , "boo"  , COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(IF_ACMPNE           , "if_acmpne"       , "boo"  , COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(GOTO                , "goto"            , "boo"  , STOP | BRANCH);
+        def(JSR                 , "jsr"             , "boo"  , STOP | BRANCH);
+        def(RET                 , "ret"             , "bi"   , STOP);
+        def(TABLESWITCH         , "tableswitch"     , ""     , STOP);
+        def(LOOKUPSWITCH        , "lookupswitch"    , ""     , STOP);
+        def(IRETURN             , "ireturn"         , "b"    , TRAP | STOP);
+        def(LRETURN             , "lreturn"         , "b"    , TRAP | STOP);
+        def(FRETURN             , "freturn"         , "b"    , TRAP | STOP);
+        def(DRETURN             , "dreturn"         , "b"    , TRAP | STOP);
+        def(ARETURN             , "areturn"         , "b"    , TRAP | STOP);
+        def(RETURN              , "return"          , "b"    , TRAP | STOP);
+        def(GETSTATIC           , "getstatic"       , "bjj"  , TRAP | FIELD_READ);
+        def(PUTSTATIC           , "putstatic"       , "bjj"  , TRAP | FIELD_WRITE);
+        def(GETFIELD            , "getfield"        , "bjj"  , TRAP | FIELD_READ);
+        def(PUTFIELD            , "putfield"        , "bjj"  , TRAP | FIELD_WRITE);
+        def(INVOKEVIRTUAL       , "invokevirtual"   , "bjj"  , TRAP | INVOKE);
+        def(INVOKESPECIAL       , "invokespecial"   , "bjj"  , TRAP | INVOKE);
+        def(INVOKESTATIC        , "invokestatic"    , "bjj"  , TRAP | INVOKE);
+        def(INVOKEINTERFACE     , "invokeinterface" , "bjja_", TRAP | INVOKE);
+        def(INVOKEDYNAMIC       , "invokedynamic"   , "bjjjj", TRAP | INVOKE);
+        def(NEW                 , "new"             , "bii"  , TRAP);
+        def(NEWARRAY            , "newarray"        , "bc"   , TRAP);
+        def(ANEWARRAY           , "anewarray"       , "bii"  , TRAP);
+        def(ARRAYLENGTH         , "arraylength"     , "b"    , TRAP);
+        def(ATHROW              , "athrow"          , "b"    , TRAP | STOP);
+        def(CHECKCAST           , "checkcast"       , "bii"  , TRAP);
+        def(INSTANCEOF          , "instanceof"      , "bii"  , TRAP);
+        def(MONITORENTER        , "monitorenter"    , "b"    , TRAP);
+        def(MONITOREXIT         , "monitorexit"     , "b"    , TRAP);
+        def(WIDE                , "wide"            , ""     );
+        def(MULTIANEWARRAY      , "multianewarray"  , "biic" , TRAP);
+        def(IFNULL              , "ifnull"          , "boo"  , FALL_THROUGH | BRANCH);
+        def(IFNONNULL           , "ifnonnull"       , "boo"  , FALL_THROUGH | BRANCH);
+        def(GOTO_W              , "goto_w"          , "boooo", STOP | BRANCH);
+        def(JSR_W               , "jsr_w"           , "boooo", STOP | BRANCH);
+        def(BREAKPOINT          , "breakpoint"      , "b"    , TRAP);
     }
     // @formatter:on
     // Checkstyle: resume
@@ -596,16 +590,6 @@
     }
 
     /**
-     * Gets the compilation complexity for a given opcode.
-     * 
-     * @param opcode an opcode
-     * @return a value >= 0
-     */
-    public static int compilationComplexity(int opcode) {
-        return compilationComplexityArray[opcode & 0xff];
-    }
-
-    /**
      * Gets the lower-case mnemonic for a given opcode.
      * 
      * @param opcode an opcode
@@ -809,8 +793,8 @@
      * @param name instruction name (should be lower case)
      * @param format encodes the length of the instruction
      */
-    private static void def(int opcode, String name, String format, int compilationComplexity) {
-        def(opcode, name, format, compilationComplexity, 0);
+    private static void def(int opcode, String name, String format) {
+        def(opcode, name, format, 0);
     }
 
     /**
@@ -820,12 +804,11 @@
      * @param format encodes the length of the instruction
      * @param flags the set of {@link Flags} associated with the instruction
      */
-    private static void def(int opcode, String name, String format, int compilationComplexity, int flags) {
+    private static void def(int opcode, String name, String format, int flags) {
         assert nameArray[opcode] == null : "opcode " + opcode + " is already bound to name " + nameArray[opcode];
         nameArray[opcode] = name;
         int instructionLength = format.length();
         lengthArray[opcode] = instructionLength;
-        compilationComplexityArray[opcode] = compilationComplexity;
         Bytecodes.flagsArray[opcode] = flags;
 
         assert !isConditionalBranch(opcode) || isBranch(opcode) : "a conditional branch must also be a branch";
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Wed May 15 14:53:34 2013 +0200
@@ -912,12 +912,12 @@
 
     @Override
     public void visitBreakpointNode(BreakpointNode node) {
-        JavaType[] sig = new JavaType[node.arguments.size()];
+        JavaType[] sig = new JavaType[node.arguments().size()];
         for (int i = 0; i < sig.length; i++) {
-            sig[i] = node.arguments.get(i).stamp().javaType(runtime);
+            sig[i] = node.arguments().get(i).stamp().javaType(runtime);
         }
 
-        Value[] parameters = visitInvokeArguments(frameMap.registerConfig.getCallingConvention(CallingConvention.Type.JavaCall, null, sig, target(), false), node.arguments);
+        Value[] parameters = visitInvokeArguments(frameMap.registerConfig.getCallingConvention(CallingConvention.Type.JavaCall, null, sig, target(), false), node.arguments());
         append(new AMD64BreakpointOp(parameters));
     }
 
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/BoxingEliminationTest.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/BoxingEliminationTest.java	Wed May 15 14:53:34 2013 +0200
@@ -116,6 +116,7 @@
         return constantBoxedShort();
     }
 
+    @Ignore
     @Test
     public void testLoop() {
         compareGraphs("testLoopSnippet", "referenceLoopSnippet", false, true);
@@ -341,6 +342,7 @@
                 new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(referenceGraph);
                 new DeadCodeEliminationPhase().apply(referenceGraph);
                 new CanonicalizerPhase().apply(referenceGraph, context);
+
                 assertEquals(referenceGraph, graph, excludeVirtual);
             }
         });
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java	Wed May 15 14:53:34 2013 +0200
@@ -149,7 +149,6 @@
         TypeProfileProxyNode.cleanFromGraph(graph);
 
         plan.runPhases(PhasePosition.HIGH_LEVEL, graph);
-
         Suites.DEFAULT.getHighTier().apply(graph, highTierContext);
 
         MidTierContext midTierContext = new MidTierContext(runtime, assumptions, replacements, target, optimisticOpts);
@@ -159,6 +158,7 @@
 
         LowTierContext lowTierContext = new LowTierContext(runtime, assumptions, replacements, target);
         Suites.DEFAULT.getLowTier().apply(graph, lowTierContext);
+        InliningPhase.storeStatisticsAfterLowTier(graph);
 
         final SchedulePhase schedule = new SchedulePhase();
         schedule.apply(graph);
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java	Wed May 15 14:53:34 2013 +0200
@@ -190,9 +190,13 @@
             if (field.isAnnotationPresent(Node.Input.class)) {
                 assert !field.isAnnotationPresent(Node.Successor.class) : "field cannot be both input and successor";
                 if (INPUT_LIST_CLASS.isAssignableFrom(type)) {
+                    assert Modifier.isFinal(field.getModifiers()) : "NodeInputList input field " + field + " should be final";
+                    assert !Modifier.isPublic(field.getModifiers()) : "NodeInputList input field " + field + " should not be public";
                     inputListOffsets.add(offset);
                 } else {
                     assert NODE_CLASS.isAssignableFrom(type) || type.isInterface() : "invalid input type: " + type;
+                    assert !Modifier.isFinal(field.getModifiers()) : "Node input field " + field + " should not be final";
+                    assert Modifier.isPrivate(field.getModifiers()) : "Node input field " + field + " should be private";
                     inputOffsets.add(offset);
                 }
                 if (field.getAnnotation(Node.Input.class).notDataflow()) {
@@ -200,9 +204,13 @@
                 }
             } else if (field.isAnnotationPresent(Node.Successor.class)) {
                 if (SUCCESSOR_LIST_CLASS.isAssignableFrom(type)) {
+                    assert Modifier.isFinal(field.getModifiers()) : "NodeSuccessorList successor field " + field + " should be final";
+                    assert !Modifier.isPublic(field.getModifiers()) : "NodeSuccessorList successor field " + field + " should not be public";
                     successorListOffsets.add(offset);
                 } else {
                     assert NODE_CLASS.isAssignableFrom(type) : "invalid successor type: " + type;
+                    assert !Modifier.isFinal(field.getModifiers()) : "Node successor field " + field + " should not be final";
+                    assert Modifier.isPrivate(field.getModifiers()) : "Node successor field " + field + " should be private";
                     successorOffsets.add(offset);
                 }
             } else {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompilationTask.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompilationTask.java	Wed May 15 14:53:34 2013 +0200
@@ -24,7 +24,6 @@
 
 import static com.oracle.graal.api.code.CodeUtil.*;
 import static com.oracle.graal.nodes.StructuredGraph.*;
-import static com.oracle.graal.phases.common.InliningUtil.*;
 
 import java.lang.reflect.Modifier;
 import java.util.concurrent.*;
@@ -40,6 +39,7 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.phases.*;
+import com.oracle.graal.phases.common.*;
 
 public final class CompilationTask implements Runnable, Comparable<CompilationTask> {
 
@@ -158,7 +158,7 @@
                             // Compiling method substitution - must clone the graph
                             graph = graph.copy();
                         }
-                        InlinedBytecodes.add(method.getCodeSize());
+                        InliningUtil.InlinedBytecodes.add(method.getCodeSize());
                         HotSpotRuntime runtime = graalRuntime.getRuntime();
                         CallingConvention cc = getCallingConvention(runtime, Type.JavaCallee, graph.method(), false);
                         return GraalCompiler.compileGraph(graph, cc, method, runtime, replacements, graalRuntime.getBackend(), graalRuntime.getTarget(), graalRuntime.getCache(), plan, optimisticOpts,
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/VMToCompilerImpl.java	Wed May 15 14:53:34 2013 +0200
@@ -27,7 +27,6 @@
 import static com.oracle.graal.hotspot.CompilationTask.*;
 import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
 import static com.oracle.graal.java.GraphBuilderPhase.*;
-import static com.oracle.graal.phases.common.InliningUtil.*;
 
 import java.io.*;
 import java.lang.reflect.*;
@@ -49,6 +48,7 @@
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.PhasePlan.PhasePosition;
+import com.oracle.graal.phases.common.*;
 import com.oracle.graal.printer.*;
 import com.oracle.graal.replacements.*;
 
@@ -319,7 +319,7 @@
         CompilationStatistics.clear(phase);
         if (graalRuntime.getConfig().ciTime) {
             parsedBytecodesPerSecond = MetricRateInPhase.snapshot(phase, parsedBytecodesPerSecond, BytecodesParsed, CompilationTime, TimeUnit.SECONDS);
-            inlinedBytecodesPerSecond = MetricRateInPhase.snapshot(phase, inlinedBytecodesPerSecond, InlinedBytecodes, CompilationTime, TimeUnit.SECONDS);
+            inlinedBytecodesPerSecond = MetricRateInPhase.snapshot(phase, inlinedBytecodesPerSecond, InliningUtil.InlinedBytecodes, CompilationTime, TimeUnit.SECONDS);
         }
     }
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod.java	Wed May 15 14:53:34 2013 +0200
@@ -34,7 +34,6 @@
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.api.meta.ProfilingInfo.TriState;
-import com.oracle.graal.bytecode.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.debug.*;
@@ -64,7 +63,6 @@
     private Map<Object, Object> compilerStorage;
     private HotSpotMethodData methodData;
     private byte[] code;
-    private int compilationComplexity;
     private CompilationTask currentTask;
     private SpeculationLog speculationLog;
 
@@ -246,22 +244,6 @@
     }
 
     @Override
-    public int getCompilationComplexity() {
-        if (compilationComplexity <= 0 && getCodeSize() > 0) {
-            BytecodeStream s = new BytecodeStream(getCode());
-            int result = 0;
-            int currentBC;
-            while ((currentBC = s.currentBC()) != Bytecodes.END) {
-                result += Bytecodes.compilationComplexity(currentBC);
-                s.next();
-            }
-            assert result > 0;
-            compilationComplexity = result;
-        }
-        return compilationComplexity;
-    }
-
-    @Override
     public ProfilingInfo getProfilingInfo() {
         ProfilingInfo info;
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CRuntimeCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CRuntimeCall.java	Wed May 15 14:53:34 2013 +0200
@@ -37,7 +37,7 @@
  */
 public class CRuntimeCall extends DeoptimizingFixedWithNextNode implements LIRGenLowerable {
 
-    @Input protected final NodeInputList<ValueNode> arguments;
+    @Input private final NodeInputList<ValueNode> arguments;
 
     private final Descriptor descriptor;
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/HotSpotNmethodExecuteNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/HotSpotNmethodExecuteNode.java	Wed May 15 14:53:34 2013 +0200
@@ -38,7 +38,7 @@
 
 public class HotSpotNmethodExecuteNode extends AbstractCallNode implements Lowerable {
 
-    @Input private final ValueNode code;
+    @Input private ValueNode code;
     private final Class[] signature;
 
     public HotSpotNmethodExecuteNode(Kind kind, Class[] signature, ValueNode code, ValueNode arg1, ValueNode arg2, ValueNode arg3) {
@@ -92,7 +92,7 @@
         LoadFieldNode loadmetaspaceMethod = graph().add(new LoadFieldNode(loadMethod, metaspaceMethodField));
 
         HotSpotIndirectCallTargetNode callTarget = graph().add(
-                        new HotSpotIndirectCallTargetNode(loadmetaspaceMethod, load, arguments, stamp(), signatureTypes, method, CallingConvention.Type.JavaCall));
+                        new HotSpotIndirectCallTargetNode(loadmetaspaceMethod, load, arguments(), stamp(), signatureTypes, method, CallingConvention.Type.JavaCall));
 
         InvokeNode invoke = graph().add(new InvokeNode(callTarget, 0));
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorEnterStubCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorEnterStubCall.java	Wed May 15 14:53:34 2013 +0200
@@ -35,8 +35,8 @@
  */
 public class MonitorEnterStubCall extends DeoptimizingStubCall implements LIRGenLowerable {
 
-    @Input private final ValueNode object;
-    @Input private final ValueNode lock;
+    @Input private ValueNode object;
+    @Input private ValueNode lock;
     public static final Descriptor MONITORENTER = new Descriptor("monitorenter", true, void.class, Object.class, Word.class);
 
     public MonitorEnterStubCall(ValueNode object, ValueNode lock) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorExitStubCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorExitStubCall.java	Wed May 15 14:53:34 2013 +0200
@@ -36,7 +36,7 @@
  */
 public class MonitorExitStubCall extends DeoptimizingStubCall implements LIRGenLowerable {
 
-    @Input private final ValueNode object;
+    @Input private ValueNode object;
     private int lockDepth;
     public static final Descriptor MONITOREXIT = new Descriptor("monitorexit", true, void.class, Object.class, Word.class);
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/NewArrayStubCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/NewArrayStubCall.java	Wed May 15 14:53:34 2013 +0200
@@ -40,8 +40,8 @@
 
     private static final Stamp defaultStamp = StampFactory.objectNonNull();
 
-    @Input private final ValueNode hub;
-    @Input private final ValueNode length;
+    @Input private ValueNode hub;
+    @Input private ValueNode length;
 
     public static final Descriptor NEW_ARRAY = new Descriptor("new_array", false, Object.class, Word.class, int.class);
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/NewInstanceStubCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/NewInstanceStubCall.java	Wed May 15 14:53:34 2013 +0200
@@ -40,7 +40,7 @@
 
     private static final Stamp defaultStamp = StampFactory.objectNonNull();
 
-    @Input private final ValueNode hub;
+    @Input private ValueNode hub;
 
     public static final Descriptor NEW_INSTANCE = new Descriptor("new_instance", false, Object.class, Word.class);
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/NewMultiArrayStubCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/NewMultiArrayStubCall.java	Wed May 15 14:53:34 2013 +0200
@@ -41,8 +41,8 @@
 
     private static final Stamp defaultStamp = StampFactory.objectNonNull();
 
-    @Input private final ValueNode hub;
-    @Input private final ValueNode dims;
+    @Input private ValueNode hub;
+    @Input private ValueNode dims;
     private final int rank;
 
     public static final Descriptor NEW_MULTI_ARRAY = new Descriptor("new_multi_array", false, Object.class, Word.class, int.class, Word.class);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/TailcallNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/TailcallNode.java	Wed May 15 14:53:34 2013 +0200
@@ -42,8 +42,8 @@
  */
 public class TailcallNode extends FixedWithNextNode implements LIRLowerable {
 
-    @Input private final FrameState frameState;
-    @Input private final ValueNode target;
+    @Input private FrameState frameState;
+    @Input private ValueNode target;
 
     /**
      * Creates a TailcallNode.
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/ThreadIsInterruptedStubCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/ThreadIsInterruptedStubCall.java	Wed May 15 14:53:34 2013 +0200
@@ -40,8 +40,8 @@
  */
 public class ThreadIsInterruptedStubCall extends DeoptimizingStubCall implements LIRGenLowerable {
 
-    @Input private final ValueNode thread;
-    @Input private final ValueNode clearIsInterrupted;
+    @Input private ValueNode thread;
+    @Input private ValueNode clearIsInterrupted;
     public static final Descriptor THREAD_IS_INTERRUPTED = new Descriptor("thread_is_interrupted", false, boolean.class, Object.class, boolean.class);
 
     public ThreadIsInterruptedStubCall(ValueNode thread, ValueNode clearIsInterrupted) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/VerifyOopStubCall.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/VerifyOopStubCall.java	Wed May 15 14:53:34 2013 +0200
@@ -35,7 +35,7 @@
  */
 public class VerifyOopStubCall extends DeoptimizingStubCall implements LIRGenLowerable {
 
-    @Input private final ValueNode object;
+    @Input private ValueNode object;
     public static final Descriptor VERIFY_OOP = new Descriptor("verify_oop", false, Object.class, Object.class);
 
     public VerifyOopStubCall(ValueNode object) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/AESCryptSubstitutions.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/AESCryptSubstitutions.java	Wed May 15 14:53:34 2013 +0200
@@ -81,9 +81,9 @@
 
     abstract static class CryptBlockStubCall extends DeoptimizingStubCall implements LIRGenLowerable {
 
-        @Input private final ValueNode in;
-        @Input private final ValueNode out;
-        @Input private final ValueNode key;
+        @Input private ValueNode in;
+        @Input private ValueNode out;
+        @Input private ValueNode key;
 
         private final Descriptor descriptor;
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/AbstractMethodHandleNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/AbstractMethodHandleNode.java	Wed May 15 14:53:34 2013 +0200
@@ -60,7 +60,7 @@
     // Replacement method data
     private ResolvedJavaMethod replacementTargetMethod;
     private JavaType replacementReturnType;
-    @Input private NodeInputList<ValueNode> replacementArguments;
+    @Input private final NodeInputList<ValueNode> replacementArguments;
 
     /**
      * Search for an instance field with the given name in a class.
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/CipherBlockChainingSubstitutions.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/CipherBlockChainingSubstitutions.java	Wed May 15 14:53:34 2013 +0200
@@ -98,11 +98,11 @@
 
     abstract static class AESCryptStubCall extends DeoptimizingStubCall implements LIRGenLowerable {
 
-        @Input private final ValueNode in;
-        @Input private final ValueNode out;
-        @Input private final ValueNode key;
-        @Input private final ValueNode r;
-        @Input private final ValueNode inLength;
+        @Input private ValueNode in;
+        @Input private ValueNode out;
+        @Input private ValueNode key;
+        @Input private ValueNode r;
+        @Input private ValueNode inLength;
 
         private final Descriptor descriptor;
 
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCLIRInstruction.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCLIRInstruction.java	Wed May 15 14:53:34 2013 +0200
@@ -27,7 +27,7 @@
 import com.oracle.graal.lir.asm.*;
 
 /**
- * Convenience class to provide AMD64MacroAssembler for the {@link #emitCode} method.
+ * Convenience class to provide SPARCAssembler for the {@link #emitCode} method.
  */
 public abstract class SPARCLIRInstruction extends LIRInstruction {
 
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java	Wed May 15 14:53:34 2013 +0200
@@ -96,7 +96,8 @@
 
     /**
      * Size of the area occupied by outgoing overflow arguments. This value is adjusted as calling
-     * conventions for outgoing calls are retrieved. On some platforms, there is a minimum
+     * conventions for outgoing calls are retrieved. On some platforms, there is a minimum outgoing
+     * size even if no overflow arguments are on the stack.
      */
     private int outgoingSize;
 
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/BreakpointNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/BreakpointNode.java	Wed May 15 14:53:34 2013 +0200
@@ -46,7 +46,7 @@
  */
 public final class BreakpointNode extends FixedWithNextNode implements LIRLowerable {
 
-    @Input public final NodeInputList<ValueNode> arguments;
+    @Input private final NodeInputList<ValueNode> arguments;
 
     public BreakpointNode(ValueNode... arguments) {
         super(StampFactory.forVoid());
@@ -57,4 +57,8 @@
     public void generate(LIRGeneratorTool gen) {
         gen.visitBreakpointNode(this);
     }
+
+    public NodeInputList<ValueNode> arguments() {
+        return arguments;
+    }
 }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/CallTargetNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/CallTargetNode.java	Wed May 15 14:53:34 2013 +0200
@@ -30,7 +30,7 @@
 
 public abstract class CallTargetNode extends ValueNode implements LIRLowerable {
 
-    @Input protected final NodeInputList<ValueNode> arguments;
+    @Input private final NodeInputList<ValueNode> arguments;
 
     public CallTargetNode(ValueNode[] arguments) {
         super(StampFactory.extension());
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IndirectCallTargetNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IndirectCallTargetNode.java	Wed May 15 14:53:34 2013 +0200
@@ -30,7 +30,7 @@
 
 public class IndirectCallTargetNode extends LoweredCallTargetNode {
 
-    @Input protected ValueNode computedAddress;
+    @Input private ValueNode computedAddress;
 
     public IndirectCallTargetNode(ValueNode computedAddress, List<ValueNode> arguments, Stamp returnStamp, JavaType[] signature, ResolvedJavaMethod target, CallingConvention.Type callType) {
         super(arguments, returnStamp, signature, target, callType);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InlineableElement.java	Wed May 15 14:53:34 2013 +0200
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.nodes;
+
+public interface InlineableElement {
+
+    int getNodeCount();
+
+    Iterable<Invoke> getInvokes();
+}
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeNode.java	Wed May 15 14:53:34 2013 +0200
@@ -37,7 +37,7 @@
 @NodeInfo(nameTemplate = "Invoke#{p#targetMethod/s}")
 public final class InvokeNode extends AbstractStateSplit implements StateSplit, Node.IterableNodeType, Invoke, LIRLowerable, MemoryCheckpoint {
 
-    @Input private final CallTargetNode callTarget;
+    @Input private CallTargetNode callTarget;
     @Input private FrameState deoptState;
     @Input private GuardingNode guard;
     private final int bci;
@@ -85,7 +85,9 @@
     @Override
     public Map<Object, Object> getDebugProperties(Map<Object, Object> map) {
         Map<Object, Object> debugProperties = super.getDebugProperties(map);
-        debugProperties.put("targetMethod", callTarget.targetName());
+        if (callTarget != null) {
+            debugProperties.put("targetMethod", callTarget.targetName());
+        }
         return debugProperties;
     }
 
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeWithExceptionNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeWithExceptionNode.java	Wed May 15 14:53:34 2013 +0200
@@ -37,7 +37,7 @@
 
     @Successor private AbstractBeginNode next;
     @Successor private DispatchBeginNode exceptionEdge;
-    @Input private final CallTargetNode callTarget;
+    @Input private CallTargetNode callTarget;
     @Input private FrameState deoptState;
     @Input private FrameState stateAfter;
     @Input private GuardingNode guard;
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/StructuredGraph.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/StructuredGraph.java	Wed May 15 14:53:34 2013 +0200
@@ -35,7 +35,7 @@
  * A graph that contains at least one distinguished node : the {@link #start() start} node. This
  * node is the start of the control flow of the graph.
  */
-public class StructuredGraph extends Graph {
+public class StructuredGraph extends Graph implements InlineableElement {
 
     public static final int INVOCATION_ENTRY_BCI = -1;
     public static final long INVALID_GRAPH_ID = -1;
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/AbstractCallNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/AbstractCallNode.java	Wed May 15 14:53:34 2013 +0200
@@ -28,7 +28,7 @@
 
 public abstract class AbstractCallNode extends AbstractStateSplit implements StateSplit, MemoryCheckpoint {
 
-    @Input protected final NodeInputList<ValueNode> arguments;
+    @Input private final NodeInputList<ValueNode> arguments;
 
     public AbstractCallNode(Stamp stamp, ValueNode[] arguments) {
         super(stamp);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/NullCheckNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/NullCheckNode.java	Wed May 15 14:53:34 2013 +0200
@@ -29,7 +29,7 @@
 
 public class NullCheckNode extends DeoptimizingFixedWithNextNode implements LIRLowerable, GuardingNode {
 
-    @Input public ValueNode object;
+    @Input private ValueNode object;
 
     public NullCheckNode(ValueNode object) {
         super(StampFactory.dependency());
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ReadNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ReadNode.java	Wed May 15 14:53:34 2013 +0200
@@ -72,6 +72,10 @@
 
     public static ValueNode canonicalizeRead(ValueNode read, LocationNode location, ValueNode object, CanonicalizerTool tool) {
         MetaAccessProvider runtime = tool.runtime();
+        if (read.usages().count() == 0) {
+            // Read without usages can be savely removed.
+            return null;
+        }
         if (runtime != null && object != null && object.isConstant()) {
             if (location.getLocationIdentity() == LocationNode.FINAL_LOCATION && location instanceof ConstantLocationNode) {
                 long displacement = ((ConstantLocationNode) location).getDisplacement();
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/SwitchNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/SwitchNode.java	Wed May 15 14:53:34 2013 +0200
@@ -34,7 +34,7 @@
  */
 public abstract class SwitchNode extends ControlSplitNode {
 
-    @Successor protected final NodeSuccessorList<AbstractBeginNode> successors;
+    @Successor private final NodeSuccessorList<AbstractBeginNode> successors;
     @Input private ValueNode value;
     private double[] keyProbabilities;
     private int[] keySuccessors;
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ValueAnchorNode.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ValueAnchorNode.java	Wed May 15 14:53:34 2013 +0200
@@ -36,7 +36,7 @@
  */
 public final class ValueAnchorNode extends FixedWithNextNode implements Canonicalizable, LIRLowerable, Node.IterableNodeType, Virtualizable, GuardingNode {
 
-    @Input private NodeInputList<ValueNode> anchored;
+    @Input private final NodeInputList<ValueNode> anchored;
 
     public ValueAnchorNode(ValueNode... values) {
         this(false, values);
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningPhase.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningPhase.java	Wed May 15 14:53:34 2013 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.phases.common;
 
-import java.lang.reflect.*;
 import java.util.*;
 import java.util.concurrent.*;
 
@@ -31,38 +30,29 @@
 import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.nodes.spi.*;
+import com.oracle.graal.nodes.type.*;
 import com.oracle.graal.nodes.util.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.PhasePlan.PhasePosition;
 import com.oracle.graal.phases.common.CanonicalizerPhase.CustomCanonicalizer;
 import com.oracle.graal.phases.common.InliningUtil.InlineInfo;
-import com.oracle.graal.phases.common.InliningUtil.InliningCallback;
+import com.oracle.graal.phases.common.InliningUtil.InlineableMacroNode;
 import com.oracle.graal.phases.common.InliningUtil.InliningPolicy;
 import com.oracle.graal.phases.graph.*;
 
-public class InliningPhase extends Phase implements InliningCallback {
-
-    /*
-     * - Detect method which only call another method with some parameters set to constants: void
-     * foo(a) -> void foo(a, b) -> void foo(a, b, c) ... These should not be taken into account when
-     * determining inlining depth. - honor the result of overrideInliningDecision(0, caller,
-     * invoke.bci, method, true);
-     */
+public class InliningPhase extends Phase {
 
     private final PhasePlan plan;
-
     private final MetaAccessProvider runtime;
-    private final Assumptions assumptions;
+    private final Assumptions compilationAssumptions;
     private final Replacements replacements;
     private final GraphCache cache;
     private final InliningPolicy inliningPolicy;
     private final OptimisticOptimizations optimisticOpts;
+
     private CustomCanonicalizer customCanonicalizer;
-
     private int inliningCount;
-
     private int maxMethodPerInlining = Integer.MAX_VALUE;
 
     // Metrics
@@ -73,17 +63,17 @@
 
     public InliningPhase(MetaAccessProvider runtime, Map<Invoke, Double> hints, Replacements replacements, Assumptions assumptions, GraphCache cache, PhasePlan plan,
                     OptimisticOptimizations optimisticOpts) {
-        this(runtime, replacements, assumptions, cache, plan, createInliningPolicy(runtime, replacements, assumptions, optimisticOpts, hints), optimisticOpts);
+        this(runtime, replacements, assumptions, cache, plan, optimisticOpts, hints);
     }
 
-    public InliningPhase(MetaAccessProvider runtime, Replacements replacements, Assumptions assumptions, GraphCache cache, PhasePlan plan, InliningPolicy inliningPolicy,
-                    OptimisticOptimizations optimisticOpts) {
+    private InliningPhase(MetaAccessProvider runtime, Replacements replacements, Assumptions assumptions, GraphCache cache, PhasePlan plan, OptimisticOptimizations optimisticOpts,
+                    Map<Invoke, Double> hints) {
         this.runtime = runtime;
         this.replacements = replacements;
-        this.assumptions = assumptions;
+        this.compilationAssumptions = assumptions;
         this.cache = cache;
         this.plan = plan;
-        this.inliningPolicy = inliningPolicy;
+        this.inliningPolicy = new GreedyInliningPolicy(replacements, hints);
         this.optimisticOpts = optimisticOpts;
     }
 
@@ -99,261 +89,255 @@
         return inliningCount;
     }
 
+    public static void storeStatisticsAfterLowTier(StructuredGraph graph) {
+        ResolvedJavaMethod method = graph.method();
+        if (method != null) {
+            CompiledMethodInfo info = compiledMethodInfo(graph.method());
+            info.setLowLevelNodeCount(graph.getNodeCount());
+        }
+    }
+
     @Override
     protected void run(final StructuredGraph graph) {
-        NodesToDoubles nodeProbabilities = new ComputeProbabilityClosure(graph).apply();
-        NodesToDoubles nodeRelevance = new ComputeInliningRelevanceClosure(graph, nodeProbabilities).apply();
-        inliningPolicy.initialize(graph);
+        InliningData data = new InliningData();
+        data.pushGraph(graph, 1.0, 1.0);
 
-        while (inliningPolicy.continueInlining(graph)) {
-            final InlineInfo candidate = inliningPolicy.next();
-
-            if (candidate != null) {
-                boolean isWorthInlining = inliningPolicy.isWorthInlining(candidate, nodeProbabilities, nodeRelevance);
-                isWorthInlining &= candidate.numberOfMethods() <= maxMethodPerInlining;
+        while (data.hasUnprocessedGraphs()) {
+            GraphInfo graphInfo = data.currentGraph();
+            if (graphInfo.hasRemainingInvokes() && inliningPolicy.continueInlining(data)) {
+                processNextInvoke(data, graphInfo);
+            } else {
+                data.popGraph();
+                MethodInvocation currentInvocation = data.currentInvocation();
+                if (currentInvocation != null) {
+                    assert currentInvocation.callee().invoke().asNode().isAlive();
+                    currentInvocation.incrementProcessedGraphs();
+                    if (currentInvocation.processedAllGraphs()) {
+                        data.popInvocation();
+                        MethodInvocation parentInvoke = data.currentInvocation();
+                        tryToInline(data.currentGraph(), currentInvocation, parentInvoke);
+                    }
+                }
+            }
+        }
+    }
 
-                metricInliningConsidered.increment();
-                if (isWorthInlining) {
-                    int mark = graph.getMark();
-                    try {
-                        List<Node> invokeUsages = candidate.invoke().asNode().usages().snapshot();
-                        candidate.inline(graph, runtime, replacements, this, assumptions);
-                        Debug.dump(graph, "after %s", candidate);
-                        Iterable<Node> newNodes = graph.getNewNodes(mark);
-                        inliningPolicy.scanInvokes(newNodes);
-                        if (GraalOptions.OptCanonicalizer) {
-                            new CanonicalizerPhase.Instance(runtime, assumptions, invokeUsages, mark, customCanonicalizer).apply(graph);
-                        }
+    /**
+     * Process the next invoke and enqueue all its graphs for processing.
+     */
+    private void processNextInvoke(InliningData data, GraphInfo graphInfo) {
+        Invoke invoke = graphInfo.popInvoke();
+        MethodInvocation callerInvocation = data.currentInvocation();
+        Assumptions parentAssumptions = callerInvocation == null ? compilationAssumptions : callerInvocation.assumptions();
+        InlineInfo info = InliningUtil.getInlineInfo(data, invoke, maxMethodPerInlining, replacements, parentAssumptions, optimisticOpts);
 
-                        nodeProbabilities = new ComputeProbabilityClosure(graph).apply();
-                        nodeRelevance = new ComputeInliningRelevanceClosure(graph, nodeProbabilities).apply();
+        double invokeProbability = graphInfo.invokeProbability(invoke);
+        double invokeRelevance = graphInfo.invokeRelevance(invoke);
+        if (info != null && inliningPolicy.isWorthInlining(info, invokeProbability, invokeRelevance, false)) {
+            MethodInvocation calleeInvocation = data.pushInvocation(info, parentAssumptions, invokeProbability, invokeRelevance);
 
-                        inliningCount++;
-                        metricInliningPerformed.increment();
-                    } catch (BailoutException bailout) {
-                        throw bailout;
-                    } catch (AssertionError e) {
-                        throw new GraalInternalError(e).addContext(candidate.toString());
-                    } catch (RuntimeException e) {
-                        throw new GraalInternalError(e).addContext(candidate.toString());
-                    } catch (GraalInternalError e) {
-                        throw e.addContext(candidate.toString());
-                    }
-                } else if (optimisticOpts.devirtualizeInvokes()) {
-                    candidate.tryToDevirtualizeInvoke(graph, runtime, assumptions);
+            for (int i = 0; i < info.numberOfMethods(); i++) {
+                InlineableElement elem = getInlineableElement(info.methodAt(i), info.invoke(), calleeInvocation.assumptions());
+                info.setInlinableElement(i, elem);
+                if (elem instanceof StructuredGraph) {
+                    data.pushGraph((StructuredGraph) elem, invokeProbability * info.probabilityAt(i), invokeRelevance * info.relevanceAt(i));
+                } else {
+                    assert elem instanceof InlineableMacroNode;
+                    data.pushDummyGraph();
                 }
             }
         }
     }
 
-    @Override
-    public StructuredGraph buildGraph(final ResolvedJavaMethod method) {
-        metricInliningRuns.increment();
+    private void tryToInline(GraphInfo callerGraphInfo, MethodInvocation calleeInfo, MethodInvocation parentInvocation) {
+        InlineInfo callee = calleeInfo.callee();
+        Assumptions callerAssumptions = parentInvocation == null ? compilationAssumptions : parentInvocation.assumptions();
+
+        if (inliningPolicy.isWorthInlining(callee, calleeInfo.probability(), calleeInfo.relevance(), true)) {
+            doInline(callerGraphInfo, calleeInfo, callerAssumptions);
+        } else if (optimisticOpts.devirtualizeInvokes()) {
+            callee.tryToDevirtualizeInvoke(runtime, callerAssumptions);
+        }
+        metricInliningConsidered.increment();
+    }
+
+    private void doInline(GraphInfo callerGraphInfo, MethodInvocation calleeInfo, Assumptions callerAssumptions) {
+        StructuredGraph callerGraph = callerGraphInfo.graph();
+        int markBeforeInlining = callerGraph.getMark();
+        InlineInfo callee = calleeInfo.callee();
+        try {
+            List<Node> invokeUsages = callee.invoke().asNode().usages().snapshot();
+            callee.inline(runtime, callerAssumptions, replacements);
+            callerAssumptions.record(calleeInfo.assumptions());
+            metricInliningRuns.increment();
+            Debug.dump(callerGraph, "after %s", callee);
+
+            if (GraalOptions.OptCanonicalizer) {
+                int markBeforeCanonicalization = callerGraph.getMark();
+                new CanonicalizerPhase.Instance(runtime, callerAssumptions, invokeUsages, markBeforeInlining, customCanonicalizer).apply(callerGraph);
+
+                // process invokes that are possibly created during canonicalization
+                for (Node newNode : callerGraph.getNewNodes(markBeforeCanonicalization)) {
+                    if (newNode instanceof Invoke) {
+                        callerGraphInfo.pushInvoke((Invoke) newNode);
+                    }
+                }
+            }
+
+            callerGraphInfo.computeProbabilities();
+
+            inliningCount++;
+            metricInliningPerformed.increment();
+        } catch (BailoutException bailout) {
+            throw bailout;
+        } catch (AssertionError | RuntimeException e) {
+            throw new GraalInternalError(e).addContext(callee.toString());
+        } catch (GraalInternalError e) {
+            throw e.addContext(callee.toString());
+        }
+    }
+
+    private InlineableElement getInlineableElement(final ResolvedJavaMethod method, Invoke invoke, Assumptions assumptions) {
+        Class<? extends FixedWithNextNode> macroNodeClass = InliningUtil.getMacroNodeClass(replacements, method);
+        if (macroNodeClass != null) {
+            return new InlineableMacroNode(macroNodeClass);
+        } else {
+            return buildGraph(method, invoke, assumptions);
+        }
+    }
+
+    private StructuredGraph buildGraph(final ResolvedJavaMethod method, final Invoke invoke, final Assumptions assumptions) {
+        final StructuredGraph newGraph;
+        final boolean parseBytecodes;
+
+        // TODO (chaeubl): copying the graph is only necessary if it is modified or if it contains
+        // any invokes
+        StructuredGraph intrinsicGraph = InliningUtil.getIntrinsicGraph(replacements, method);
+        if (intrinsicGraph != null) {
+            newGraph = intrinsicGraph.copy();
+            parseBytecodes = false;
+        } else {
+            StructuredGraph cachedGraph = getCachedGraph(method);
+            if (cachedGraph != null) {
+                newGraph = cachedGraph.copy();
+                parseBytecodes = false;
+            } else {
+                newGraph = new StructuredGraph(method);
+                parseBytecodes = true;
+            }
+        }
+
+        return Debug.scope("InlineGraph", newGraph, new Callable<StructuredGraph>() {
+
+            @Override
+            public StructuredGraph call() throws Exception {
+                if (parseBytecodes) {
+                    parseBytecodes(newGraph, assumptions);
+                }
+
+                boolean callerHasMoreInformationAboutArguments = false;
+                NodeInputList<ValueNode> args = invoke.callTarget().arguments();
+                for (LocalNode localNode : newGraph.getNodes(LocalNode.class).snapshot()) {
+                    ValueNode arg = args.get(localNode.index());
+                    if (arg.isConstant()) {
+                        Constant constant = arg.asConstant();
+                        newGraph.replaceFloating(localNode, ConstantNode.forConstant(constant, runtime, newGraph));
+                        callerHasMoreInformationAboutArguments = true;
+                    } else {
+                        Stamp joinedStamp = localNode.stamp().join(arg.stamp());
+                        if (!joinedStamp.equals(localNode.stamp())) {
+                            localNode.setStamp(joinedStamp);
+                            callerHasMoreInformationAboutArguments = true;
+                        }
+                    }
+                }
+
+                if (!callerHasMoreInformationAboutArguments) {
+                    // TODO (chaeubl): if args are not more concrete, inlining should be avoided
+                    // in most cases or we could at least use the previous graph size + invoke
+                    // probability to check the inlining
+                }
+
+                if (GraalOptions.OptCanonicalizer) {
+                    new CanonicalizerPhase.Instance(runtime, assumptions).apply(newGraph);
+                }
+
+                return newGraph;
+            }
+        });
+    }
+
+    private StructuredGraph getCachedGraph(ResolvedJavaMethod method) {
         if (GraalOptions.CacheGraphs && cache != null) {
             StructuredGraph cachedGraph = cache.get(method);
             if (cachedGraph != null) {
                 return cachedGraph;
             }
         }
-        final StructuredGraph newGraph = new StructuredGraph(method);
-        return Debug.scope("InlineGraph", newGraph, new Callable<StructuredGraph>() {
-
-            @Override
-            public StructuredGraph call() throws Exception {
-                if (plan != null) {
-                    plan.runPhases(PhasePosition.AFTER_PARSING, newGraph);
-                }
-                assert newGraph.start().next() != null : "graph needs to be populated during PhasePosition.AFTER_PARSING";
-
-                new DeadCodeEliminationPhase().apply(newGraph);
-
-                if (GraalOptions.OptCanonicalizer) {
-                    new CanonicalizerPhase.Instance(runtime, assumptions).apply(newGraph);
-                }
-                if (GraalOptions.CullFrameStates) {
-                    new CullFrameStatesPhase().apply(newGraph);
-                }
-                if (GraalOptions.CacheGraphs && cache != null) {
-                    cache.put(newGraph);
-                }
-                return newGraph;
-            }
-        });
+        return null;
     }
 
-    private interface InliningDecision {
+    private StructuredGraph parseBytecodes(StructuredGraph newGraph, Assumptions assumptions) {
+        if (plan != null) {
+            plan.runPhases(PhasePosition.AFTER_PARSING, newGraph);
+        }
+        assert newGraph.start().next() != null : "graph needs to be populated during PhasePosition.AFTER_PARSING";
+
+        new DeadCodeEliminationPhase().apply(newGraph);
 
-        boolean isWorthInlining(InlineInfo info, NodesToDoubles nodeProbabilities, NodesToDoubles nodeRelevance);
+        if (GraalOptions.OptCanonicalizer) {
+            new CanonicalizerPhase.Instance(runtime, assumptions).apply(newGraph);
+        }
+
+        if (GraalOptions.CullFrameStates) {
+            new CullFrameStatesPhase().apply(newGraph);
+        }
+        if (GraalOptions.CacheGraphs && cache != null) {
+            cache.put(newGraph.copy());
+        }
+        return newGraph;
     }
 
-    private static class GreedySizeBasedInliningDecision implements InliningDecision {
+    private static synchronized CompiledMethodInfo compiledMethodInfo(ResolvedJavaMethod m) {
+        CompiledMethodInfo info = (CompiledMethodInfo) m.getCompilerStorage().get(CompiledMethodInfo.class);
+        if (info == null) {
+            info = new CompiledMethodInfo();
+            m.getCompilerStorage().put(CompiledMethodInfo.class, info);
+        }
+        return info;
+    }
 
-        private final MetaAccessProvider runtime;
-        private final Replacements replacements;
-        private final Map<Invoke, Double> hints;
+    private abstract static class AbstractInliningPolicy implements InliningPolicy {
 
-        public GreedySizeBasedInliningDecision(MetaAccessProvider runtime, Replacements replacements, Map<Invoke, Double> hints) {
-            this.runtime = runtime;
+        protected final Replacements replacements;
+        protected final Map<Invoke, Double> hints;
+
+        public AbstractInliningPolicy(Replacements replacements, Map<Invoke, Double> hints) {
             this.replacements = replacements;
             this.hints = hints;
         }
 
-        @Override
-        public boolean isWorthInlining(InlineInfo info, NodesToDoubles nodeProbabilities, NodesToDoubles nodeRelevance) {
-            /*
-             * TODO (chaeubl): invoked methods that are on important paths but not yet compiled ->
-             * will be compiled anyways and it is likely that we are the only caller... might be
-             * useful to inline those methods but increases bootstrap time (maybe those methods are
-             * also getting queued in the compilation queue concurrently)
-             */
-
-            if (GraalOptions.AlwaysInlineIntrinsics) {
-                if (onlyIntrinsics(replacements, info)) {
-                    return InliningUtil.logInlinedMethod(info, "intrinsic");
-                }
-            } else {
-                if (onlyForcedIntrinsics(replacements, info)) {
-                    return InliningUtil.logInlinedMethod(info, "intrinsic");
-                }
-            }
-
-            double bonus = 1;
-            if (hints != null && hints.containsKey(info.invoke())) {
-                bonus = hints.get(info.invoke());
-            }
-
-            int bytecodeSize = (int) (bytecodeCodeSize(info) / bonus);
-            int complexity = (int) (compilationComplexity(info) / bonus);
-            int compiledCodeSize = (int) (compiledCodeSize(info) / bonus);
-            double relevance = nodeRelevance.get(info.invoke().asNode());
-            /*
-             * as long as the compiled code size is small enough (or the method was not yet
-             * compiled), we can do a pretty general inlining that suits most situations
-             */
-            if (compiledCodeSize < GraalOptions.SmallCompiledCodeSize) {
-                if (isTrivialInlining(bytecodeSize, complexity, compiledCodeSize)) {
-                    return InliningUtil.logInlinedMethod(info, "trivial (bytecodes=%d, complexity=%d, codeSize=%d)", bytecodeSize, complexity, compiledCodeSize);
-                }
-
-                if (canInlineRelevanceBased(relevance, bytecodeSize, complexity, compiledCodeSize)) {
-                    return InliningUtil.logInlinedMethod(info, "relevance-based (relevance=%f, bytecodes=%d, complexity=%d, codeSize=%d)", relevance, bytecodeSize, complexity, compiledCodeSize);
-                }
-            }
-
-            /*
-             * the normal inlining did not fit this invoke, so check if we have any reason why we
-             * should still do the inlining
-             */
-            double probability = nodeProbabilities.get(info.invoke().asNode());
-            int transferredValues = numberOfTransferredValues(info);
-            int invokeUsages = countInvokeUsages(info);
-            int moreSpecificArguments = countMoreSpecificArgumentInfo(info);
-            int level = info.level();
-
-            // TODO (chaeubl): compute metric that is used to check if this method should be inlined
-
-            return InliningUtil.logNotInlinedMethod(info,
-                            "(relevance=%f, bytecodes=%d, complexity=%d, codeSize=%d, probability=%f, transferredValues=%d, invokeUsages=%d, moreSpecificArguments=%d, level=%d, bonus=%f)", relevance,
-                            bytecodeSize, complexity, compiledCodeSize, probability, transferredValues, invokeUsages, moreSpecificArguments, level, bonus);
-        }
-
-        private static boolean isTrivialInlining(int bytecodeSize, int complexity, int compiledCodeSize) {
-            return bytecodeSize < GraalOptions.TrivialBytecodeSize || complexity < GraalOptions.TrivialComplexity || compiledCodeSize > 0 && compiledCodeSize < GraalOptions.TrivialCompiledCodeSize;
-        }
-
-        private static boolean canInlineRelevanceBased(double relevance, int bytecodeSize, int complexity, int compiledCodeSize) {
-            return bytecodeSize < computeMaximumSize(relevance, GraalOptions.NormalBytecodeSize) || complexity < computeMaximumSize(relevance, GraalOptions.NormalComplexity) || compiledCodeSize > 0 &&
-                            compiledCodeSize < computeMaximumSize(relevance, GraalOptions.NormalCompiledCodeSize);
-        }
-
-        private static double computeMaximumSize(double relevance, int configuredMaximum) {
+        protected double computeMaximumSize(double relevance, int configuredMaximum) {
             double inlineRatio = Math.min(GraalOptions.RelevanceCapForInlining, relevance);
             return configuredMaximum * inlineRatio;
         }
 
-        private static int numberOfTransferredValues(InlineInfo info) {
-            MethodCallTargetNode methodCallTargetNode = ((MethodCallTargetNode) info.invoke().callTarget());
-            Signature signature = methodCallTargetNode.targetMethod().getSignature();
-            int transferredValues = signature.getParameterCount(!Modifier.isStatic(methodCallTargetNode.targetMethod().getModifiers()));
-            if (signature.getReturnKind() != Kind.Void) {
-                transferredValues++;
+        protected double getInliningBonus(InlineInfo info) {
+            if (hints != null && hints.containsKey(info.invoke())) {
+                return hints.get(info.invoke());
             }
-            return transferredValues;
-        }
-
-        private static int countInvokeUsages(InlineInfo info) {
-            // inlining calls with lots of usages simplifies the caller
-            int usages = 0;
-            for (Node n : info.invoke().asNode().usages()) {
-                if (!(n instanceof FrameState)) {
-                    usages++;
-                }
-            }
-            return usages;
+            return 1;
         }
 
-        private int countMoreSpecificArgumentInfo(InlineInfo info) {
-            /*
-             * inlining invokes where the caller has very specific information about the passed
-             * argument simplifies the callee
-             */
-            int moreSpecificArgumentInfo = 0;
-            MethodCallTargetNode methodCallTarget = (MethodCallTargetNode) info.invoke().callTarget();
-            boolean isStatic = methodCallTarget.isStatic();
-            int signatureOffset = isStatic ? 0 : 1;
-            NodeInputList arguments = methodCallTarget.arguments();
-            ResolvedJavaMethod targetMethod = methodCallTarget.targetMethod();
-            ResolvedJavaType methodHolderClass = targetMethod.getDeclaringClass();
-            Signature signature = targetMethod.getSignature();
-
-            for (int i = 0; i < arguments.size(); i++) {
-                Node n = arguments.get(i);
-                if (n instanceof ConstantNode) {
-                    moreSpecificArgumentInfo++;
-                } else if (n instanceof ValueNode && !((ValueNode) n).kind().isPrimitive()) {
-                    ResolvedJavaType actualType = ((ValueNode) n).stamp().javaType(runtime);
-                    JavaType declaredType;
-                    if (i == 0 && !isStatic) {
-                        declaredType = methodHolderClass;
-                    } else {
-                        declaredType = signature.getParameterType(i - signatureOffset, methodHolderClass);
-                    }
-
-                    if (declaredType instanceof ResolvedJavaType && !actualType.equals(declaredType) && ((ResolvedJavaType) declaredType).isAssignableFrom(actualType)) {
-                        moreSpecificArgumentInfo++;
-                    }
-                }
-
+        protected boolean isIntrinsic(InlineInfo info) {
+            if (GraalOptions.AlwaysInlineIntrinsics) {
+                return onlyIntrinsics(info);
+            } else {
+                return onlyForcedIntrinsics(info);
             }
-
-            return moreSpecificArgumentInfo;
         }
 
-        private static int bytecodeCodeSize(InlineInfo info) {
-            int result = 0;
-            for (int i = 0; i < info.numberOfMethods(); i++) {
-                result += info.methodAt(i).getCodeSize();
-            }
-            return result;
-        }
-
-        private static int compilationComplexity(InlineInfo info) {
-            int result = 0;
-            for (int i = 0; i < info.numberOfMethods(); i++) {
-                result += info.methodAt(i).getCompilationComplexity();
-            }
-            return result;
-        }
-
-        private static int compiledCodeSize(InlineInfo info) {
-            int result = 0;
-            for (int i = 0; i < info.numberOfMethods(); i++) {
-                result += info.methodAt(i).getCompiledCodeSize();
-            }
-            return result;
-        }
-
-        private static boolean onlyIntrinsics(Replacements replacements, InlineInfo info) {
+        private boolean onlyIntrinsics(InlineInfo info) {
             for (int i = 0; i < info.numberOfMethods(); i++) {
                 if (!InliningUtil.canIntrinsify(replacements, info.methodAt(i))) {
                     return false;
@@ -362,7 +346,7 @@
             return true;
         }
 
-        private static boolean onlyForcedIntrinsics(Replacements replacements, InlineInfo info) {
+        private boolean onlyForcedIntrinsics(InlineInfo info) {
             for (int i = 0; i < info.numberOfMethods(); i++) {
                 if (!InliningUtil.canIntrinsify(replacements, info.methodAt(i))) {
                     return false;
@@ -373,84 +357,99 @@
             }
             return true;
         }
+
+        protected static int previousLowLevelGraphSize(InlineInfo info) {
+            int size = 0;
+            for (int i = 0; i < info.numberOfMethods(); i++) {
+                size += compiledMethodInfo(info.methodAt(i)).lowLevelNodeCount();
+            }
+            return size;
+        }
+
+        protected static int determineNodeCount(InlineInfo info) {
+            int nodes = 0;
+            for (int i = 0; i < info.numberOfMethods(); i++) {
+                InlineableElement elem = info.inlineableElementAt(i);
+                if (elem != null) {
+                    nodes += elem.getNodeCount();
+                }
+            }
+            return nodes;
+        }
+
+        protected static double determineInvokeProbability(InlineInfo info) {
+            double invokeProbability = 0;
+            for (int i = 0; i < info.numberOfMethods(); i++) {
+                InlineableElement callee = info.inlineableElementAt(i);
+                Iterable<Invoke> invokes = callee.getInvokes();
+                if (invokes.iterator().hasNext()) {
+                    NodesToDoubles nodeProbabilities = new ComputeProbabilityClosure((StructuredGraph) callee).apply();
+                    for (Invoke invoke : invokes) {
+                        invokeProbability += nodeProbabilities.get(invoke.asNode());
+                    }
+                }
+            }
+            return invokeProbability;
+        }
     }
 
-    private static class CFInliningPolicy implements InliningPolicy {
+    private static class GreedyInliningPolicy extends AbstractInliningPolicy {
 
-        private final InliningDecision inliningDecision;
-        private final Assumptions assumptions;
-        private final Replacements replacements;
-        private final OptimisticOptimizations optimisticOpts;
-        private final Deque<Invoke> sortedInvokes;
-        private NodeBitMap visitedFixedNodes;
-        private FixedNode invokePredecessor;
-
-        public CFInliningPolicy(InliningDecision inliningPolicy, Replacements replacements, Assumptions assumptions, OptimisticOptimizations optimisticOpts) {
-            this.inliningDecision = inliningPolicy;
-            this.replacements = replacements;
-            this.assumptions = assumptions;
-            this.optimisticOpts = optimisticOpts;
-            this.sortedInvokes = new ArrayDeque<>();
+        public GreedyInliningPolicy(Replacements replacements, Map<Invoke, Double> hints) {
+            super(replacements, hints);
         }
 
-        public boolean continueInlining(StructuredGraph graph) {
-            if (graph.getNodeCount() >= GraalOptions.MaximumDesiredSize) {
+        public boolean continueInlining(InliningData data) {
+            if (data.currentGraph().graph().getNodeCount() >= GraalOptions.MaximumDesiredSize) {
                 InliningUtil.logInliningDecision("inlining is cut off by MaximumDesiredSize");
                 metricInliningStoppedByMaxDesiredSize.increment();
                 return false;
             }
 
-            return !sortedInvokes.isEmpty();
-        }
+            MethodInvocation currentInvocation = data.currentInvocation();
+            if (currentInvocation == null) {
+                return true;
+            }
 
-        public InlineInfo next() {
-            Invoke invoke = sortedInvokes.pop();
-            InlineInfo info = InliningUtil.getInlineInfo(invoke, replacements, assumptions, optimisticOpts);
-            if (info != null) {
-                invokePredecessor = (FixedNode) info.invoke().predecessor();
-                assert invokePredecessor.isAlive();
-            }
-            return info;
+            return isWorthInlining(currentInvocation.callee(), currentInvocation.probability(), currentInvocation.relevance(), false);
         }
 
         @Override
-        public boolean isWorthInlining(InlineInfo info, NodesToDoubles nodeProbabilities, NodesToDoubles nodeRelevance) {
-            return inliningDecision.isWorthInlining(info, nodeProbabilities, nodeRelevance);
-        }
-
-        public void initialize(StructuredGraph graph) {
-            visitedFixedNodes = graph.createNodeBitMap(true);
-            scanGraphForInvokes(graph.start());
-        }
+        public boolean isWorthInlining(InlineInfo info, double probability, double relevance, boolean fullyProcessed) {
+            if (isIntrinsic(info)) {
+                return InliningUtil.logInlinedMethod(info, fullyProcessed, "intrinsic");
+            }
 
-        public void scanInvokes(Iterable<? extends Node> newNodes) {
-            assert invokePredecessor.isAlive();
-            int invokes = scanGraphForInvokes(invokePredecessor);
-            assert invokes == countInvokes(newNodes);
-        }
+            double inliningBonus = getInliningBonus(info);
 
-        private int scanGraphForInvokes(FixedNode start) {
-            ArrayList<Invoke> invokes = new InliningIterator(start, visitedFixedNodes).apply();
-
-            // insert the newly found invokes in their correct control-flow order
-            for (int i = invokes.size() - 1; i >= 0; i--) {
-                Invoke invoke = invokes.get(i);
-                assert !sortedInvokes.contains(invoke);
-                sortedInvokes.addFirst(invoke);
-
+            int lowLevelGraphSize = previousLowLevelGraphSize(info);
+            if (GraalOptions.SmallCompiledLowLevelGraphSize > 0 && lowLevelGraphSize > GraalOptions.SmallCompiledLowLevelGraphSize * inliningBonus) {
+                return InliningUtil.logNotInlinedMethod(info, "too large previous low-level graph: %d", lowLevelGraphSize);
             }
 
-            return invokes.size();
-        }
+            /*
+             * TODO (chaeubl): invoked methods that are on important paths but not yet compiled ->
+             * will be compiled anyways and it is likely that we are the only caller... might be
+             * useful to inline those methods but increases bootstrap time (maybe those methods are
+             * also getting queued in the compilation queue concurrently)
+             */
+
+            int nodes = determineNodeCount(info);
+            if (nodes < GraalOptions.TrivialInliningSize * inliningBonus) {
+                return InliningUtil.logInlinedMethod(info, fullyProcessed, "trivial (nodes=%d)", nodes);
+            }
 
-        private static int countInvokes(Iterable<? extends Node> nodes) {
-            int count = 0;
-            for (Node n : nodes) {
-                if (n instanceof Invoke) {
-                    count++;
-                }
+            double invokes = determineInvokeProbability(info);
+            if (GraalOptions.LimitInlinedInvokes > 0 && fullyProcessed && invokes > GraalOptions.LimitInlinedInvokes * inliningBonus) {
+                return InliningUtil.logNotInlinedMethod(info, "invoke probability is too high (%f)", invokes);
             }
-            return count;
+
+            double maximumNodes = computeMaximumSize(relevance, (int) (GraalOptions.MaximumInliningSize * inliningBonus));
+            if (nodes < maximumNodes) {
+                return InliningUtil.logInlinedMethod(info, fullyProcessed, "relevance-based (relevance=%f, nodes=%d)", relevance, nodes);
+            }
+
+            return InliningUtil.logNotInlinedMethod(info, "(relevance=%f, probability=%f, bonus=%f)", relevance, probability, inliningBonus);
         }
     }
 
@@ -467,8 +466,8 @@
             assert start.isAlive();
         }
 
-        public ArrayList<Invoke> apply() {
-            ArrayList<Invoke> invokes = new ArrayList<>();
+        public Stack<Invoke> apply() {
+            Stack<Invoke> invokes = new Stack<>();
             FixedNode current;
             forcedQueue(start);
 
@@ -477,7 +476,7 @@
 
                 if (current instanceof Invoke) {
                     if (current != start) {
-                        invokes.add((Invoke) current);
+                        invokes.push((Invoke) current);
                     }
                     queueSuccessors(current);
                 } else if (current instanceof LoopBeginNode) {
@@ -547,8 +546,234 @@
         }
     }
 
-    private static InliningPolicy createInliningPolicy(MetaAccessProvider runtime, Replacements replacements, Assumptions assumptions, OptimisticOptimizations optimisticOpts, Map<Invoke, Double> hints) {
-        InliningDecision inliningDecision = new GreedySizeBasedInliningDecision(runtime, replacements, hints);
-        return new CFInliningPolicy(inliningDecision, replacements, assumptions, optimisticOpts);
+    /**
+     * Holds the data for building the callee graphs recursively: graphs and invocations (each
+     * invocation can have multiple graphs).
+     */
+    static class InliningData {
+
+        private static final GraphInfo DummyGraphInfo = new GraphInfo(null, new Stack<Invoke>(), 1.0, 1.0);
+
+        private final ArrayDeque<GraphInfo> graphQueue;
+        private final ArrayDeque<MethodInvocation> invocationQueue;
+
+        private int maxGraphs = 1;
+
+        public InliningData() {
+            this.graphQueue = new ArrayDeque<>();
+            this.invocationQueue = new ArrayDeque<>();
+        }
+
+        public void pushGraph(StructuredGraph graph, double probability, double relevance) {
+            assert !contains(graph);
+            NodeBitMap visitedFixedNodes = graph.createNodeBitMap();
+            Stack<Invoke> invokes = new InliningIterator(graph.start(), visitedFixedNodes).apply();
+            assert invokes.size() == count(graph.getInvokes());
+            graphQueue.push(new GraphInfo(graph, invokes, probability, relevance));
+            assert graphQueue.size() <= maxGraphs;
+        }
+
+        public void pushDummyGraph() {
+            graphQueue.push(DummyGraphInfo);
+        }
+
+        public boolean hasUnprocessedGraphs() {
+            return !graphQueue.isEmpty();
+        }
+
+        public GraphInfo currentGraph() {
+            return graphQueue.peek();
+        }
+
+        public void popGraph() {
+            graphQueue.pop();
+            assert graphQueue.size() <= maxGraphs;
+        }
+
+        public MethodInvocation currentInvocation() {
+            return invocationQueue.peek();
+        }
+
+        public MethodInvocation pushInvocation(InlineInfo info, Assumptions assumptions, double probability, double relevance) {
+            MethodInvocation methodInvocation = new MethodInvocation(info, new Assumptions(assumptions.useOptimisticAssumptions()), probability, relevance);
+            invocationQueue.push(methodInvocation);
+            maxGraphs += info.numberOfMethods();
+            assert graphQueue.size() <= maxGraphs;
+            return methodInvocation;
+        }
+
+        public void popInvocation() {
+            maxGraphs -= invocationQueue.peek().callee.numberOfMethods();
+            assert graphQueue.size() <= maxGraphs;
+            invocationQueue.pop();
+        }
+
+        public int countRecursiveInlining(ResolvedJavaMethod method) {
+            int count = 0;
+            for (GraphInfo graphInfo : graphQueue) {
+                if (method.equals(graphInfo.method())) {
+                    count++;
+                }
+            }
+            return count;
+        }
+
+        public int inliningDepth() {
+            return invocationQueue.size();
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder result = new StringBuilder("Invocations: ");
+
+            for (MethodInvocation invocation : invocationQueue) {
+                result.append(invocation.callee().numberOfMethods());
+                result.append("x ");
+                result.append(invocation.callee().invoke());
+                result.append("; ");
+            }
+
+            result.append("\nGraphs: ");
+            for (GraphInfo graph : graphQueue) {
+                result.append(graph.graph());
+                result.append("; ");
+            }
+
+            return result.toString();
+        }
+
+        private boolean contains(StructuredGraph graph) {
+            for (GraphInfo info : graphQueue) {
+                if (info.graph() == graph) {
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        private static int count(Iterable<Invoke> invokes) {
+            int count = 0;
+            Iterator<Invoke> iterator = invokes.iterator();
+            while (iterator.hasNext()) {
+                iterator.next();
+                count++;
+            }
+            return count;
+        }
+    }
+
+    private static class MethodInvocation {
+
+        private final InlineInfo callee;
+        private final Assumptions assumptions;
+        private final double probability;
+        private final double relevance;
+
+        private int processedGraphs;
+
+        public MethodInvocation(InlineInfo info, Assumptions assumptions, double probability, double relevance) {
+            this.callee = info;
+            this.assumptions = assumptions;
+            this.probability = probability;
+            this.relevance = relevance;
+        }
+
+        public void incrementProcessedGraphs() {
+            processedGraphs++;
+            assert processedGraphs <= callee.numberOfMethods();
+        }
+
+        public boolean processedAllGraphs() {
+            assert processedGraphs <= callee.numberOfMethods();
+            return processedGraphs == callee.numberOfMethods();
+        }
+
+        public InlineInfo callee() {
+            return callee;
+        }
+
+        public Assumptions assumptions() {
+            return assumptions;
+        }
+
+        public double probability() {
+            return probability;
+        }
+
+        public double relevance() {
+            return relevance;
+        }
+    }
+
+    private static class GraphInfo {
+
+        private final StructuredGraph graph;
+        private final Stack<Invoke> remainingInvokes;
+        private final double probability;
+        private final double relevance;
+
+        private NodesToDoubles nodeProbabilities;
+        private NodesToDoubles nodeRelevance;
+
+        public GraphInfo(StructuredGraph graph, Stack<Invoke> invokes, double probability, double relevance) {
+            this.graph = graph;
+            this.remainingInvokes = invokes;
+            this.probability = probability;
+            this.relevance = relevance;
+
+            if (graph != null) {
+                computeProbabilities();
+            }
+        }
+
+        public ResolvedJavaMethod method() {
+            return graph.method();
+        }
+
+        public boolean hasRemainingInvokes() {
+            return !remainingInvokes.isEmpty();
+        }
+
+        public StructuredGraph graph() {
+            return graph;
+        }
+
+        public Invoke popInvoke() {
+            return remainingInvokes.pop();
+        }
+
+        public void pushInvoke(Invoke invoke) {
+            remainingInvokes.push(invoke);
+        }
+
+        public void computeProbabilities() {
+            nodeProbabilities = new ComputeProbabilityClosure(graph).apply();
+            nodeRelevance = new ComputeInliningRelevanceClosure(graph, nodeProbabilities).apply();
+        }
+
+        public double invokeProbability(Invoke invoke) {
+            return probability * nodeProbabilities.get(invoke.asNode());
+        }
+
+        public double invokeRelevance(Invoke invoke) {
+            return Math.min(GraalOptions.CapInheritedRelevance, relevance) * nodeRelevance.get(invoke.asNode());
+        }
+    }
+
+    private static class CompiledMethodInfo {
+
+        private int lowLevelNodes;
+
+        public CompiledMethodInfo() {
+        }
+
+        public int lowLevelNodeCount() {
+            return lowLevelNodes;
+        }
+
+        public void setLowLevelNodeCount(int lowLevelNodes) {
+            this.lowLevelNodes = lowLevelNodes;
+        }
+
     }
 }
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java	Wed May 15 14:53:34 2013 +0200
@@ -43,13 +43,13 @@
 import com.oracle.graal.nodes.type.*;
 import com.oracle.graal.nodes.util.*;
 import com.oracle.graal.phases.*;
+import com.oracle.graal.phases.common.InliningPhase.InliningData;
 import com.oracle.graal.phases.tiers.*;
 
 public class InliningUtil {
 
     private static final DebugMetric metricInliningTailDuplication = Debug.metric("InliningTailDuplication");
     private static final String inliningDecisionsScopeString = "InliningDecisions";
-
     /**
      * Meters the size (in bytecodes) of all methods processed during compilation (i.e., top level
      * and all inlined methods), irrespective of how many bytecodes in each method are actually
@@ -57,22 +57,34 @@
      */
     public static final DebugMetric InlinedBytecodes = Debug.metric("InlinedBytecodes");
 
-    public interface InliningCallback {
-
-        StructuredGraph buildGraph(final ResolvedJavaMethod method);
-    }
-
     public interface InliningPolicy {
 
-        void initialize(StructuredGraph graph);
+        boolean continueInlining(InliningData data);
+
+        boolean isWorthInlining(InlineInfo info, double probability, double relevance, boolean fullyProcessed);
+    }
 
-        boolean continueInlining(StructuredGraph graph);
+    public static class InlineableMacroNode implements InlineableElement {
+
+        private final Class<? extends FixedWithNextNode> macroNodeClass;
+
+        public InlineableMacroNode(Class<? extends FixedWithNextNode> macroNodeClass) {
+            this.macroNodeClass = macroNodeClass;
+        }
 
-        InlineInfo next();
+        @Override
+        public int getNodeCount() {
+            return 1;
+        }
 
-        void scanInvokes(Iterable<? extends Node> newNodes);
+        @Override
+        public Iterable<Invoke> getInvokes() {
+            return Collections.emptyList();
+        }
 
-        boolean isWorthInlining(InlineInfo info, NodesToDoubles nodeProbabilities, NodesToDoubles nodeRelevance);
+        public Class<? extends FixedWithNextNode> getMacroNodeClass() {
+            return macroNodeClass;
+        }
     }
 
     /**
@@ -104,21 +116,22 @@
         }
     }
 
-    public static boolean logInlinedMethod(InlineInfo info, String msg, Object... args) {
-        logInliningDecision(info, true, msg, args);
-        return true;
+    public static boolean logInlinedMethod(InlineInfo info, boolean allowLogging, String msg, Object... args) {
+        return logInliningDecision(info, allowLogging, true, msg, args);
     }
 
     public static boolean logNotInlinedMethod(InlineInfo info, String msg, Object... args) {
-        logInliningDecision(info, false, msg, args);
-        return false;
+        return logInliningDecision(info, true, false, msg, args);
     }
 
-    public static void logInliningDecision(InlineInfo info, boolean success, String msg, final Object... args) {
-        printInlining(info, success, msg, args);
-        if (shouldLogInliningDecision()) {
-            logInliningDecision(methodName(info), success, msg, args);
+    public static boolean logInliningDecision(InlineInfo info, boolean allowLogging, boolean success, String msg, final Object... args) {
+        if (allowLogging) {
+            printInlining(info, success, msg, args);
+            if (shouldLogInliningDecision()) {
+                logInliningDecision(methodName(info), success, msg, args);
+            }
         }
+        return success;
     }
 
     public static void logInliningDecision(final String msg, final Object... args) {
@@ -130,7 +143,7 @@
         });
     }
 
-    private static boolean logNotInlinedMethodAndReturnFalse(Invoke invoke, String msg) {
+    private static boolean logNotInlinedMethod(Invoke invoke, String msg) {
         if (shouldLogInliningDecision()) {
             String methodString = invoke.toString() + (invoke.callTarget() == null ? " callTarget=null" : invoke.callTarget().targetName());
             logInliningDecision(methodString, false, msg, new Object[0]);
@@ -213,25 +226,36 @@
      */
     public interface InlineInfo {
 
+        StructuredGraph graph();
+
         Invoke invoke();
 
-        int level();
-
+        /**
+         * Returns the number of invoked methods.
+         */
         int numberOfMethods();
 
         ResolvedJavaMethod methodAt(int index);
 
+        InlineableElement inlineableElementAt(int index);
+
+        double probabilityAt(int index);
+
+        double relevanceAt(int index);
+
+        void setInlinableElement(int index, InlineableElement inlineableElement);
+
         /**
          * Performs the inlining described by this object and returns the node that represents the
          * return value of the inlined method (or null for void methods and methods that have no
          * non-exceptional exit).
          **/
-        void inline(StructuredGraph graph, MetaAccessProvider runtime, Replacements replacements, InliningCallback callback, Assumptions assumptions);
+        void inline(MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements);
 
         /**
          * Try to make the call static bindable to avoid interface and virtual method calls.
          */
-        void tryToDevirtualizeInvoke(StructuredGraph graph, MetaAccessProvider runtime, Assumptions assumptions);
+        void tryToDevirtualizeInvoke(MetaAccessProvider runtime, Assumptions assumptions);
     }
 
     public abstract static class AbstractInlineInfo implements InlineInfo {
@@ -243,29 +267,40 @@
         }
 
         @Override
+        public StructuredGraph graph() {
+            return invoke.asNode().graph();
+        }
+
+        @Override
         public Invoke invoke() {
             return invoke;
         }
 
-        @Override
-        public int level() {
-            return computeInliningLevel(invoke);
-        }
+        protected static void inline(Invoke invoke, ResolvedJavaMethod concrete, InlineableElement inlineable, Assumptions assumptions, boolean receiverNullCheck) {
+            StructuredGraph graph = invoke.asNode().graph();
+            if (inlineable instanceof StructuredGraph) {
+                StructuredGraph calleeGraph = (StructuredGraph) inlineable;
+                InliningUtil.inline(invoke, calleeGraph, receiverNullCheck);
 
-        protected static void inline(Invoke invoke, ResolvedJavaMethod concrete, InliningCallback callback, Replacements replacements, Assumptions assumptions, boolean receiverNullCheck) {
-            Class<? extends FixedWithNextNode> macroNodeClass = getMacroNodeClass(replacements, concrete);
-            StructuredGraph graph = invoke.asNode().graph();
-            if (macroNodeClass != null) {
+                graph.getLeafGraphIds().add(calleeGraph.graphId());
+                // we might at some point cache already-inlined graphs, so add recursively:
+                graph.getLeafGraphIds().addAll(calleeGraph.getLeafGraphIds());
+            } else {
+                assert inlineable instanceof InlineableMacroNode;
+
+                Class<? extends FixedWithNextNode> macroNodeClass = ((InlineableMacroNode) inlineable).getMacroNodeClass();
                 if (((MethodCallTargetNode) invoke.callTarget()).targetMethod() != concrete) {
                     assert ((MethodCallTargetNode) invoke.callTarget()).invokeKind() != InvokeKind.Static;
                     InliningUtil.replaceInvokeCallTarget(invoke, graph, InvokeKind.Special, concrete);
                 }
+
                 FixedWithNextNode macroNode;
                 try {
                     macroNode = macroNodeClass.getConstructor(Invoke.class).newInstance(invoke);
                 } catch (ReflectiveOperationException | IllegalArgumentException | SecurityException e) {
                     throw new GraalInternalError(e).addContext(invoke.asNode()).addContext("macroSubstitution", macroNodeClass);
                 }
+
                 CallTargetNode callTarget = invoke.callTarget();
                 if (invoke instanceof InvokeNode) {
                     graph.replaceFixedWithFixed((InvokeNode) invoke, graph.add(macroNode));
@@ -275,30 +310,10 @@
                     graph.replaceSplitWithFixed(invokeWithException, graph.add(macroNode), invokeWithException.next());
                 }
                 GraphUtil.killWithUnusedFloatingInputs(callTarget);
-            } else {
-                StructuredGraph calleeGraph = getIntrinsicGraph(replacements, concrete);
-                if (calleeGraph == null) {
-                    calleeGraph = getGraph(concrete, callback);
-                }
-                InlinedBytecodes.add(concrete.getCodeSize());
-                assumptions.recordMethodContents(concrete);
-                InliningUtil.inline(invoke, calleeGraph, receiverNullCheck);
+            }
 
-                graph.getLeafGraphIds().add(calleeGraph.graphId());
-                // we might at some point cache already-inlined graphs, so add recursively:
-                graph.getLeafGraphIds().addAll(calleeGraph.getLeafGraphIds());
-            }
-        }
-
-        protected static StructuredGraph getGraph(final ResolvedJavaMethod concrete, final InliningCallback callback) {
-            return Debug.scope("GetInliningGraph", concrete, new Callable<StructuredGraph>() {
-
-                @Override
-                public StructuredGraph call() throws Exception {
-                    assert !Modifier.isNative(concrete.getModifiers());
-                    return callback.buildGraph(concrete);
-                }
-            });
+            InlinedBytecodes.add(concrete.getCodeSize());
+            assumptions.recordMethodContents(concrete);
         }
     }
 
@@ -314,7 +329,8 @@
      */
     private static class ExactInlineInfo extends AbstractInlineInfo {
 
-        public final ResolvedJavaMethod concrete;
+        protected final ResolvedJavaMethod concrete;
+        private InlineableElement inlineableElement;
 
         public ExactInlineInfo(Invoke invoke, ResolvedJavaMethod concrete) {
             super(invoke);
@@ -322,12 +338,12 @@
         }
 
         @Override
-        public void inline(StructuredGraph compilerGraph, MetaAccessProvider runtime, Replacements replacements, InliningCallback callback, Assumptions assumptions) {
-            inline(invoke, concrete, callback, replacements, assumptions, true);
+        public void inline(MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements) {
+            inline(invoke, concrete, inlineableElement, assumptions, true);
         }
 
         @Override
-        public void tryToDevirtualizeInvoke(StructuredGraph graph, MetaAccessProvider runtime, Assumptions assumptions) {
+        public void tryToDevirtualizeInvoke(MetaAccessProvider runtime, Assumptions assumptions) {
             // nothing todo, can already be bound statically
         }
 
@@ -343,9 +359,33 @@
         }
 
         @Override
+        public double probabilityAt(int index) {
+            assert index == 0;
+            return 1.0;
+        }
+
+        @Override
+        public double relevanceAt(int index) {
+            assert index == 0;
+            return 1.0;
+        }
+
+        @Override
         public String toString() {
             return "exact " + MetaUtil.format("%H.%n(%p):%r", concrete);
         }
+
+        @Override
+        public InlineableElement inlineableElementAt(int index) {
+            assert index == 0;
+            return inlineableElement;
+        }
+
+        @Override
+        public void setInlinableElement(int index, InlineableElement inlineableElement) {
+            assert index == 0;
+            this.inlineableElement = inlineableElement;
+        }
     }
 
     /**
@@ -355,8 +395,9 @@
      */
     private static class TypeGuardInlineInfo extends AbstractInlineInfo {
 
-        public final ResolvedJavaMethod concrete;
-        public final ResolvedJavaType type;
+        private final ResolvedJavaMethod concrete;
+        private final ResolvedJavaType type;
+        private InlineableElement inlineableElement;
 
         public TypeGuardInlineInfo(Invoke invoke, ResolvedJavaMethod concrete, ResolvedJavaType type) {
             super(invoke);
@@ -376,15 +417,39 @@
         }
 
         @Override
-        public void inline(StructuredGraph graph, MetaAccessProvider runtime, Replacements replacements, InliningCallback callback, Assumptions assumptions) {
-            createGuard(graph, runtime);
-            inline(invoke, concrete, callback, replacements, assumptions, false);
+        public InlineableElement inlineableElementAt(int index) {
+            assert index == 0;
+            return inlineableElement;
+        }
+
+        @Override
+        public double probabilityAt(int index) {
+            assert index == 0;
+            return 1.0;
         }
 
         @Override
-        public void tryToDevirtualizeInvoke(StructuredGraph graph, MetaAccessProvider runtime, Assumptions assumptions) {
-            createGuard(graph, runtime);
-            replaceInvokeCallTarget(invoke, graph, InvokeKind.Special, concrete);
+        public double relevanceAt(int index) {
+            assert index == 0;
+            return 1.0;
+        }
+
+        @Override
+        public void setInlinableElement(int index, InlineableElement inlineableElement) {
+            assert index == 0;
+            this.inlineableElement = inlineableElement;
+        }
+
+        @Override
+        public void inline(MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements) {
+            createGuard(graph(), runtime);
+            inline(invoke, concrete, inlineableElement, assumptions, false);
+        }
+
+        @Override
+        public void tryToDevirtualizeInvoke(MetaAccessProvider runtime, Assumptions assumptions) {
+            createGuard(graph(), runtime);
+            replaceInvokeCallTarget(invoke, graph(), InvokeKind.Special, concrete);
         }
 
         private void createGuard(StructuredGraph graph, MetaAccessProvider runtime) {
@@ -418,11 +483,14 @@
      */
     private static class MultiTypeGuardInlineInfo extends AbstractInlineInfo {
 
-        public final List<ResolvedJavaMethod> concretes;
-        public final ArrayList<ProfiledType> ptypes;
-        public final ArrayList<Integer> typesToConcretes;
-        public final double notRecordedTypeProbability;
+        private final List<ResolvedJavaMethod> concretes;
+        private final double[] methodProbabilities;
+        private final double maximumMethodProbability;
+        private final ArrayList<Integer> typesToConcretes;
+        private final ArrayList<ProfiledType> ptypes;
         private final ArrayList<Double> concretesProbabilities;
+        private final double notRecordedTypeProbability;
+        private final InlineableElement[] inlineableElements;
 
         public MultiTypeGuardInlineInfo(Invoke invoke, ArrayList<ResolvedJavaMethod> concretes, ArrayList<Double> concretesProbabilities, ArrayList<ProfiledType> ptypes,
                         ArrayList<Integer> typesToConcretes, double notRecordedTypeProbability) {
@@ -435,6 +503,28 @@
             this.ptypes = ptypes;
             this.typesToConcretes = typesToConcretes;
             this.notRecordedTypeProbability = notRecordedTypeProbability;
+            this.inlineableElements = new InlineableElement[concretes.size()];
+            this.methodProbabilities = computeMethodProbabilities();
+            this.maximumMethodProbability = maximumMethodProbability();
+            assert maximumMethodProbability > 0;
+        }
+
+        private double[] computeMethodProbabilities() {
+            double[] result = new double[concretes.size()];
+            for (int i = 0; i < typesToConcretes.size(); i++) {
+                int concrete = typesToConcretes.get(i);
+                double probability = ptypes.get(i).getProbability();
+                result[concrete] += probability;
+            }
+            return result;
+        }
+
+        private double maximumMethodProbability() {
+            double max = 0;
+            for (int i = 0; i < methodProbabilities.length; i++) {
+                max = Math.max(max, methodProbabilities[i]);
+            }
+            return max;
         }
 
         @Override
@@ -449,13 +539,35 @@
         }
 
         @Override
-        public void inline(StructuredGraph graph, MetaAccessProvider runtime, Replacements replacements, InliningCallback callback, Assumptions assumptions) {
+        public InlineableElement inlineableElementAt(int index) {
+            assert index >= 0 && index < concretes.size();
+            return inlineableElements[index];
+        }
+
+        @Override
+        public double probabilityAt(int index) {
+            return methodProbabilities[index];
+        }
+
+        @Override
+        public double relevanceAt(int index) {
+            return probabilityAt(index) / maximumMethodProbability;
+        }
+
+        @Override
+        public void setInlinableElement(int index, InlineableElement inlineableElement) {
+            assert index >= 0 && index < concretes.size();
+            inlineableElements[index] = inlineableElement;
+        }
+
+        @Override
+        public void inline(MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements) {
             // receiver null check must be the first node
             InliningUtil.receiverNullCheck(invoke);
             if (hasSingleMethod()) {
-                inlineSingleMethod(graph, callback, replacements, assumptions, runtime);
+                inlineSingleMethod(graph(), runtime, assumptions);
             } else {
-                inlineMultipleMethods(graph, callback, replacements, assumptions, runtime);
+                inlineMultipleMethods(graph(), runtime, assumptions, replacements);
             }
         }
 
@@ -467,7 +579,7 @@
             return notRecordedTypeProbability > 0;
         }
 
-        private void inlineMultipleMethods(StructuredGraph graph, InliningCallback callback, Replacements replacements, Assumptions assumptions, MetaAccessProvider runtime) {
+        private void inlineMultipleMethods(StructuredGraph graph, MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements) {
             int numberOfMethods = concretes.size();
             FixedNode continuation = invoke.next();
 
@@ -549,7 +661,7 @@
                 PiNode anchoredReceiver = createAnchoredReceiver(graph, node, commonType, receiver, exact);
                 invokeForInlining.callTarget().replaceFirstInput(receiver, anchoredReceiver);
 
-                inline(invokeForInlining, concretes.get(i), callback, replacements, assumptions, false);
+                inline(invokeForInlining, methodAt(i), inlineableElementAt(i), assumptions, false);
 
                 replacementNodes.add(anchoredReceiver);
             }
@@ -573,6 +685,7 @@
                     }
                     current = ((FixedWithNextNode) current).next();
                 } while (current instanceof FixedWithNextNode);
+
                 if (opportunities > 0) {
                     metricInliningTailDuplication.increment();
                     Debug.log("MultiTypeGuardInlineInfo starting tail duplication (%d opportunities)", opportunities);
@@ -614,8 +727,8 @@
             return result;
         }
 
-        private void inlineSingleMethod(StructuredGraph graph, InliningCallback callback, Replacements replacements, Assumptions assumptions, MetaAccessProvider runtime) {
-            assert concretes.size() == 1 && ptypes.size() > 1 && !shouldFallbackToInvoke() && notRecordedTypeProbability == 0;
+        private void inlineSingleMethod(StructuredGraph graph, MetaAccessProvider runtime, Assumptions assumptions) {
+            assert concretes.size() == 1 && inlineableElements.length == 1 && ptypes.size() > 1 && !shouldFallbackToInvoke() && notRecordedTypeProbability == 0;
 
             AbstractBeginNode calleeEntryNode = graph.add(new BeginNode());
 
@@ -625,8 +738,7 @@
 
             calleeEntryNode.setNext(invoke.asNode());
 
-            ResolvedJavaMethod concrete = concretes.get(0);
-            inline(invoke, concrete, callback, replacements, assumptions, false);
+            inline(invoke, methodAt(0), inlineableElementAt(0), assumptions, false);
         }
 
         private boolean createDispatchOnTypeBeforeInvoke(StructuredGraph graph, AbstractBeginNode[] successors, boolean invokeIsOnlySuccessor, MetaAccessProvider runtime) {
@@ -792,11 +904,11 @@
         }
 
         @Override
-        public void tryToDevirtualizeInvoke(StructuredGraph graph, MetaAccessProvider runtime, Assumptions assumptions) {
+        public void tryToDevirtualizeInvoke(MetaAccessProvider runtime, Assumptions assumptions) {
             if (hasSingleMethod()) {
-                tryToDevirtualizeSingleMethod(graph, runtime);
+                tryToDevirtualizeSingleMethod(graph(), runtime);
             } else {
-                tryToDevirtualizeMultipleMethods(graph, runtime);
+                tryToDevirtualizeMultipleMethods(graph(), runtime);
             }
         }
 
@@ -876,15 +988,15 @@
         }
 
         @Override
-        public void inline(StructuredGraph graph, MetaAccessProvider runtime, Replacements replacements, InliningCallback callback, Assumptions assumptions) {
+        public void inline(MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements) {
             assumptions.record(takenAssumption);
-            super.inline(graph, runtime, replacements, callback, assumptions);
+            super.inline(runtime, assumptions, replacements);
         }
 
         @Override
-        public void tryToDevirtualizeInvoke(StructuredGraph graph, MetaAccessProvider runtime, Assumptions assumptions) {
+        public void tryToDevirtualizeInvoke(MetaAccessProvider runtime, Assumptions assumptions) {
             assumptions.record(takenAssumption);
-            replaceInvokeCallTarget(invoke, graph, InvokeKind.Special, concrete);
+            replaceInvokeCallTarget(invoke, graph(), InvokeKind.Special, concrete);
         }
 
         @Override
@@ -899,7 +1011,7 @@
      * @param invoke the invoke that should be inlined
      * @return an instance of InlineInfo, or null if no inlining is possible at the given invoke
      */
-    public static InlineInfo getInlineInfo(Invoke invoke, Replacements replacements, Assumptions assumptions, OptimisticOptimizations optimisticOpts) {
+    public static InlineInfo getInlineInfo(InliningData data, Invoke invoke, int maxNumberOfMethods, Replacements replacements, Assumptions assumptions, OptimisticOptimizations optimisticOpts) {
         if (!checkInvokeConditions(invoke)) {
             return null;
         }
@@ -907,7 +1019,7 @@
         ResolvedJavaMethod targetMethod = callTarget.targetMethod();
 
         if (callTarget.invokeKind() == InvokeKind.Special || targetMethod.canBeStaticallyBound()) {
-            return getExactInlineInfo(replacements, invoke, optimisticOpts, targetMethod);
+            return getExactInlineInfo(data, invoke, replacements, optimisticOpts, targetMethod);
         }
 
         assert callTarget.invokeKind() == InvokeKind.Virtual || callTarget.invokeKind() == InvokeKind.Interface;
@@ -922,50 +1034,52 @@
                 holder = receiverType;
                 if (receiverStamp.isExactType()) {
                     assert targetMethod.getDeclaringClass().isAssignableFrom(holder) : holder + " subtype of " + targetMethod.getDeclaringClass() + " for " + targetMethod;
-                    return getExactInlineInfo(replacements, invoke, optimisticOpts, holder.resolveMethod(targetMethod));
+                    return getExactInlineInfo(data, invoke, replacements, optimisticOpts, holder.resolveMethod(targetMethod));
                 }
             }
         }
 
         if (holder.isArray()) {
             // arrays can be treated as Objects
-            return getExactInlineInfo(replacements, invoke, optimisticOpts, holder.resolveMethod(targetMethod));
+            return getExactInlineInfo(data, invoke, replacements, optimisticOpts, holder.resolveMethod(targetMethod));
         }
 
         if (assumptions.useOptimisticAssumptions()) {
             ResolvedJavaType uniqueSubtype = holder.findUniqueConcreteSubtype();
             if (uniqueSubtype != null) {
-                return getAssumptionInlineInfo(replacements, invoke, optimisticOpts, uniqueSubtype.resolveMethod(targetMethod), new Assumptions.ConcreteSubtype(holder, uniqueSubtype));
+                return getAssumptionInlineInfo(data, invoke, replacements, optimisticOpts, uniqueSubtype.resolveMethod(targetMethod), new Assumptions.ConcreteSubtype(holder, uniqueSubtype));
             }
 
             ResolvedJavaMethod concrete = holder.findUniqueConcreteMethod(targetMethod);
             if (concrete != null) {
-                return getAssumptionInlineInfo(replacements, invoke, optimisticOpts, concrete, new Assumptions.ConcreteMethod(targetMethod, holder, concrete));
+                return getAssumptionInlineInfo(data, invoke, replacements, optimisticOpts, concrete, new Assumptions.ConcreteMethod(targetMethod, holder, concrete));
             }
         }
 
         // type check based inlining
-        return getTypeCheckedInlineInfo(replacements, invoke, targetMethod, optimisticOpts);
+        return getTypeCheckedInlineInfo(data, invoke, maxNumberOfMethods, replacements, targetMethod, optimisticOpts);
     }
 
-    private static InlineInfo getAssumptionInlineInfo(Replacements replacements, Invoke invoke, OptimisticOptimizations optimisticOpts, ResolvedJavaMethod concrete, Assumption takenAssumption) {
+    private static InlineInfo getAssumptionInlineInfo(InliningData data, Invoke invoke, Replacements replacements, OptimisticOptimizations optimisticOpts, ResolvedJavaMethod concrete,
+                    Assumption takenAssumption) {
         assert !Modifier.isAbstract(concrete.getModifiers());
-        if (!checkTargetConditions(replacements, invoke, concrete, optimisticOpts)) {
+        if (!checkTargetConditions(data, replacements, invoke, concrete, optimisticOpts)) {
             return null;
         }
         return new AssumptionInlineInfo(invoke, concrete, takenAssumption);
     }
 
-    private static InlineInfo getExactInlineInfo(Replacements replacements, Invoke invoke, OptimisticOptimizations optimisticOpts, ResolvedJavaMethod targetMethod) {
+    private static InlineInfo getExactInlineInfo(InliningData data, Invoke invoke, Replacements replacements, OptimisticOptimizations optimisticOpts, ResolvedJavaMethod targetMethod) {
         assert !Modifier.isAbstract(targetMethod.getModifiers());
-        if (!checkTargetConditions(replacements, invoke, targetMethod, optimisticOpts)) {
+        if (!checkTargetConditions(data, replacements, invoke, targetMethod, optimisticOpts)) {
             return null;
         }
         return new ExactInlineInfo(invoke, targetMethod);
     }
 
-    private static InlineInfo getTypeCheckedInlineInfo(Replacements replacements, Invoke invoke, ResolvedJavaMethod targetMethod, OptimisticOptimizations optimisticOpts) {
-        JavaTypeProfile typeProfile = null;
+    private static InlineInfo getTypeCheckedInlineInfo(InliningData data, Invoke invoke, int maxNumberOfMethods, Replacements replacements, ResolvedJavaMethod targetMethod,
+                    OptimisticOptimizations optimisticOpts) {
+        JavaTypeProfile typeProfile;
         ValueNode receiver = invoke.callTarget().arguments().get(0);
         if (receiver instanceof TypeProfileProxyNode) {
             TypeProfileProxyNode typeProfileProxyNode = (TypeProfileProxyNode) receiver;
@@ -987,7 +1101,7 @@
 
             ResolvedJavaType type = ptypes[0].getType();
             ResolvedJavaMethod concrete = type.resolveMethod(targetMethod);
-            if (!checkTargetConditions(replacements, invoke, concrete, optimisticOpts)) {
+            if (!checkTargetConditions(data, replacements, invoke, concrete, optimisticOpts)) {
                 return null;
             }
             return new TypeGuardInlineInfo(invoke, concrete, type);
@@ -1020,6 +1134,10 @@
                 }
             }
 
+            if (concreteMethods.size() > maxNumberOfMethods) {
+                return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "polymorphic call with more than %d target methods", maxNumberOfMethods);
+            }
+
             // Clear methods that fall below the threshold.
             if (notRecordedTypeProbability > 0) {
                 ArrayList<ResolvedJavaMethod> newConcreteMethods = new ArrayList<>();
@@ -1060,7 +1178,7 @@
             }
 
             for (ResolvedJavaMethod concrete : concreteMethods) {
-                if (!checkTargetConditions(replacements, invoke, concrete, optimisticOpts)) {
+                if (!checkTargetConditions(data, replacements, invoke, concrete, optimisticOpts)) {
                     return logNotInlinedMethodAndReturnNull(invoke, targetMethod, "it is a polymorphic method call and at least one invoked method cannot be inlined");
                 }
             }
@@ -1076,25 +1194,25 @@
     // TODO (chaeubl): cleanup this method
     private static boolean checkInvokeConditions(Invoke invoke) {
         if (invoke.predecessor() == null || !invoke.asNode().isAlive()) {
-            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke is dead code");
+            return logNotInlinedMethod(invoke, "the invoke is dead code");
         } else if (!(invoke.callTarget() instanceof MethodCallTargetNode)) {
-            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke has already been lowered, or has been created as a low-level node");
+            return logNotInlinedMethod(invoke, "the invoke has already been lowered, or has been created as a low-level node");
         } else if (((MethodCallTargetNode) invoke.callTarget()).targetMethod() == null) {
-            return logNotInlinedMethodAndReturnFalse(invoke, "target method is null");
+            return logNotInlinedMethod(invoke, "target method is null");
         } else if (invoke.stateAfter() == null) {
             // TODO (chaeubl): why should an invoke not have a state after?
-            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke has no after state");
+            return logNotInlinedMethod(invoke, "the invoke has no after state");
         } else if (!invoke.useForInlining()) {
-            return logNotInlinedMethodAndReturnFalse(invoke, "the invoke is marked to be not used for inlining");
+            return logNotInlinedMethod(invoke, "the invoke is marked to be not used for inlining");
         } else if (((MethodCallTargetNode) invoke.callTarget()).receiver() != null && ((MethodCallTargetNode) invoke.callTarget()).receiver().isConstant() &&
                         ((MethodCallTargetNode) invoke.callTarget()).receiver().asConstant().isNull()) {
-            return logNotInlinedMethodAndReturnFalse(invoke, "receiver is null");
+            return logNotInlinedMethod(invoke, "receiver is null");
         } else {
             return true;
         }
     }
 
-    private static boolean checkTargetConditions(Replacements replacements, Invoke invoke, ResolvedJavaMethod method, OptimisticOptimizations optimisticOpts) {
+    private static boolean checkTargetConditions(InliningData data, Replacements replacements, Invoke invoke, ResolvedJavaMethod method, OptimisticOptimizations optimisticOpts) {
         if (method == null) {
             return logNotInlinedMethodAndReturnFalse(invoke, method, "the method is not resolved");
         } else if (Modifier.isNative(method.getModifiers()) && (!GraalOptions.Intrinsify || !InliningUtil.canIntrinsify(replacements, method))) {
@@ -1105,7 +1223,7 @@
             return logNotInlinedMethodAndReturnFalse(invoke, method, "the method's class is not initialized");
         } else if (!method.canBeInlined()) {
             return logNotInlinedMethodAndReturnFalse(invoke, method, "it is marked non-inlinable");
-        } else if (computeRecursiveInliningLevel(invoke.stateAfter(), method) > GraalOptions.MaximumRecursiveInlining) {
+        } else if (data.countRecursiveInlining(method) > GraalOptions.MaximumRecursiveInlining) {
             return logNotInlinedMethodAndReturnFalse(invoke, method, "it exceeds the maximum recursive inlining depth");
         } else if (new OptimisticOptimizations(method).lessOptimisticThan(optimisticOpts)) {
             return logNotInlinedMethodAndReturnFalse(invoke, method, "the callee uses less optimistic optimizations than caller");
@@ -1124,20 +1242,6 @@
         return count;
     }
 
-    private static int computeRecursiveInliningLevel(FrameState state, ResolvedJavaMethod method) {
-        assert state != null;
-
-        int count = 0;
-        FrameState curState = state;
-        while (curState != null) {
-            if (curState.method() == method) {
-                count++;
-            }
-            curState = curState.outerFrameState();
-        }
-        return count;
-    }
-
     static MonitorExitNode findPrecedingMonitorExit(UnwindNode unwind) {
         Node pred = unwind.predecessor();
         while (pred != null) {
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java	Wed May 15 14:53:34 2013 +0200
@@ -54,18 +54,13 @@
     public static int     MaximumRecursiveInlining           = 1;
     public static float   BoostInliningForEscapeAnalysis     = 2f;
     public static float   RelevanceCapForInlining            = 1f;
+    public static float   CapInheritedRelevance              = 1f;
     public static boolean IterativeInlining                  = ____;
 
-    public static int     TrivialBytecodeSize                = 10;
-    public static int     NormalBytecodeSize                 = 150;
-    public static int     MaximumBytecodeSize                = 500;
-    public static int     TrivialComplexity                  = 10;
-    public static int     NormalComplexity                   = 60;
-    public static int     MaximumComplexity                  = 400;
-    public static int     TrivialCompiledCodeSize            = 150;
-    public static int     NormalCompiledCodeSize             = 750;
-    public static int     MaximumCompiledCodeSize            = 4000;
-    public static int     SmallCompiledCodeSize              = 1000;
+    public static int     TrivialInliningSize                = 10;
+    public static int     MaximumInliningSize                = 300;
+    public static int     SmallCompiledLowLevelGraphSize     = 300;
+    public static double  LimitInlinedInvokes                = 10.0;
 
     // escape analysis settings
     public static boolean PartialEscapeAnalysis              = true;
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/graph/ComputeProbabilityClosure.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/graph/ComputeProbabilityClosure.java	Wed May 15 14:53:34 2013 +0200
@@ -24,7 +24,6 @@
 
 import java.util.*;
 
-import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.util.*;
@@ -65,9 +64,7 @@
 
     public NodesToDoubles apply() {
         new PropagateProbability(graph.start()).apply();
-        Debug.dump(graph, "After PropagateProbability");
         computeLoopFactors();
-        Debug.dump(graph, "After computeLoopFactors");
         new PropagateLoopFrequency(graph.start()).apply();
         return nodeProbabilities;
     }
--- a/graal/com.oracle.graal.sparc/src/com/oracle/graal/sparc/SPARC.java	Tue May 14 22:02:23 2013 +0200
+++ b/graal/com.oracle.graal.sparc/src/com/oracle/graal/sparc/SPARC.java	Wed May 15 14:53:34 2013 +0200
@@ -38,7 +38,7 @@
     // SPARC: Define registers.
 
     public SPARC() {
-        super("AMD64", 8, ByteOrder.LITTLE_ENDIAN, null, LOAD_STORE | STORE_STORE, 1, 0, 8);
+        super("SPARC", 8, ByteOrder.LITTLE_ENDIAN, null, LOAD_STORE | STORE_STORE, 1, 0, 8);
         // SPARC: Fix architecture parameters.
     }
 
--- a/src/os/windows/vm/os_windows.cpp	Tue May 14 22:02:23 2013 +0200
+++ b/src/os/windows/vm/os_windows.cpp	Wed May 15 14:53:34 2013 +0200
@@ -2172,16 +2172,17 @@
   #ifdef GRAAL
     PCONTEXT ctx = exceptionInfo->ContextRecord;
     address pc = (address)ctx->Rip;
-    assert(pc[0] == 0x48 && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
-    if (pc[0] == 0x48) {
+    assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
+    if (pc[0] == 0xF7) {
       // set correct result values and continue after idiv instruction
+      ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
+    } else {
       ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
-      ctx->Rax = (DWORD64)min_jlong;     // result
-    } else {
-      ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
-      ctx->Rax = (DWORD64)min_jlong;     // result
     }
-    ctx->Rdx = (DWORD64)0;             // remainder
+    // do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
+    // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
+    // idiv opcode (0xF7)
+    ctx->Rdx = (DWORD64)0;               // remainder
     // Continue the execution
   #else
     PCONTEXT ctx = exceptionInfo->ContextRecord;
--- a/src/share/vm/code/nmethod.cpp	Tue May 14 22:02:23 2013 +0200
+++ b/src/share/vm/code/nmethod.cpp	Wed May 15 14:53:34 2013 +0200
@@ -1,517 +1,515 @@
-/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeCache.hpp"
-#include "code/compiledIC.hpp"
-#include "code/dependencies.hpp"
-#include "code/nmethod.hpp"
-#include "code/scopeDesc.hpp"
-#include "compiler/abstractCompiler.hpp"
-#include "compiler/compileBroker.hpp"
-#include "compiler/compileLog.hpp"
-#include "compiler/compilerOracle.hpp"
-#include "compiler/disassembler.hpp"
-#include "interpreter/bytecode.hpp"
-#include "oops/methodData.hpp"
-#include "prims/jvmtiRedefineClassesTrace.hpp"
-#include "prims/jvmtiImpl.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/sweeper.hpp"
-#include "utilities/dtrace.hpp"
-#include "utilities/events.hpp"
-#include "utilities/xmlstream.hpp"
-#ifdef SHARK
-#include "shark/sharkCompiler.hpp"
-#endif
-#ifdef GRAAL
-#include "graal/graalJavaAccess.hpp"
-#endif
-
-#ifdef DTRACE_ENABLED
-
-// Only bother with this argument setup if dtrace is available
-
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
-  const char*, int, const char*, int, const char*, int, void*, size_t);
-
-HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
-  char*, int, char*, int, char*, int);
-
-#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
-  {                                                                       \
-    Method* m = (method);                                                 \
-    if (m != NULL) {                                                      \
-      Symbol* klass_name = m->klass_name();                               \
-      Symbol* name = m->name();                                           \
-      Symbol* signature = m->signature();                                 \
-      HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
-        klass_name->bytes(), klass_name->utf8_length(),                   \
-        name->bytes(), name->utf8_length(),                               \
-        signature->bytes(), signature->utf8_length());                    \
-    }                                                                     \
-  }
-#else /* USDT2 */
-#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
-  {                                                                       \
-    Method* m = (method);                                                 \
-    if (m != NULL) {                                                      \
-      Symbol* klass_name = m->klass_name();                               \
-      Symbol* name = m->name();                                           \
-      Symbol* signature = m->signature();                                 \
-      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
-        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
-        (char *) name->bytes(), name->utf8_length(),                               \
-        (char *) signature->bytes(), signature->utf8_length());                    \
-    }                                                                     \
-  }
-#endif /* USDT2 */
-
-#else //  ndef DTRACE_ENABLED
-
-#define DTRACE_METHOD_UNLOAD_PROBE(method)
-
-#endif
-
-bool nmethod::is_compiled_by_c1() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
-  return compiler()->is_c1();
-}
-bool nmethod::is_compiled_by_graal() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
-  return compiler()->is_graal();
-}
-bool nmethod::is_compiled_by_c2() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
-  return compiler()->is_c2();
-}
-bool nmethod::is_compiled_by_shark() const {
-  if (is_native_method()) return false;
-  assert(compiler() != NULL, "must be");
-  return compiler()->is_shark();
-}
-
-
-
-//---------------------------------------------------------------------------------
-// NMethod statistics
-// They are printed under various flags, including:
-//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
-// (In the latter two cases, they like other stats are printed to the log only.)
-
-#ifndef PRODUCT
-// These variables are put into one block to reduce relocations
-// and make it simpler to print from the debugger.
-static
-struct nmethod_stats_struct {
-  int nmethod_count;
-  int total_size;
-  int relocation_size;
-  int consts_size;
-  int insts_size;
-  int stub_size;
-  int scopes_data_size;
-  int scopes_pcs_size;
-  int dependencies_size;
-  int handler_table_size;
-  int nul_chk_table_size;
-  int oops_size;
-
-  void note_nmethod(nmethod* nm) {
-    nmethod_count += 1;
-    total_size          += nm->size();
-    relocation_size     += nm->relocation_size();
-    consts_size         += nm->consts_size();
-    insts_size          += nm->insts_size();
-    stub_size           += nm->stub_size();
-    oops_size           += nm->oops_size();
-    scopes_data_size    += nm->scopes_data_size();
-    scopes_pcs_size     += nm->scopes_pcs_size();
-    dependencies_size   += nm->dependencies_size();
-    handler_table_size  += nm->handler_table_size();
-    nul_chk_table_size  += nm->nul_chk_table_size();
-  }
-  void print_nmethod_stats() {
-    if (nmethod_count == 0)  return;
-    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
-    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
-    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
-    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
-    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
-    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
-    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
-    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
-    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
-    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
-    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
-    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
-  }
-
-  int native_nmethod_count;
-  int native_total_size;
-  int native_relocation_size;
-  int native_insts_size;
-  int native_oops_size;
-  void note_native_nmethod(nmethod* nm) {
-    native_nmethod_count += 1;
-    native_total_size       += nm->size();
-    native_relocation_size  += nm->relocation_size();
-    native_insts_size       += nm->insts_size();
-    native_oops_size        += nm->oops_size();
-  }
-  void print_native_nmethod_stats() {
-    if (native_nmethod_count == 0)  return;
-    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
-    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
-    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
-    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
-    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
-  }
-
-  int pc_desc_resets;   // number of resets (= number of caches)
-  int pc_desc_queries;  // queries to nmethod::find_pc_desc
-  int pc_desc_approx;   // number of those which have approximate true
-  int pc_desc_repeats;  // number of _pc_descs[0] hits
-  int pc_desc_hits;     // number of LRU cache hits
-  int pc_desc_tests;    // total number of PcDesc examinations
-  int pc_desc_searches; // total number of quasi-binary search steps
-  int pc_desc_adds;     // number of LUR cache insertions
-
-  void print_pc_stats() {
-    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
-                  pc_desc_queries,
-                  (double)(pc_desc_tests + pc_desc_searches)
-                  / pc_desc_queries);
-    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
-                  pc_desc_resets,
-                  pc_desc_queries, pc_desc_approx,
-                  pc_desc_repeats, pc_desc_hits,
-                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
-  }
-} nmethod_stats;
-#endif //PRODUCT
-
-
-//---------------------------------------------------------------------------------
-
-
-ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
-  assert(pc != NULL, "Must be non null");
-  assert(exception.not_null(), "Must be non null");
-  assert(handler != NULL, "Must be non null");
-
-  _count = 0;
-  _exception_type = exception->klass();
-  _next = NULL;
-
-  add_address_and_handler(pc,handler);
-}
-
-
-address ExceptionCache::match(Handle exception, address pc) {
-  assert(pc != NULL,"Must be non null");
-  assert(exception.not_null(),"Must be non null");
-  if (exception->klass() == exception_type()) {
-    return (test_address(pc));
-  }
-
-  return NULL;
-}
-
-
-bool ExceptionCache::match_exception_with_space(Handle exception) {
-  assert(exception.not_null(),"Must be non null");
-  if (exception->klass() == exception_type() && count() < cache_size) {
-    return true;
-  }
-  return false;
-}
-
-
-address ExceptionCache::test_address(address addr) {
-  for (int i=0; i<count(); i++) {
-    if (pc_at(i) == addr) {
-      return handler_at(i);
-    }
-  }
-  return NULL;
-}
-
-
-bool ExceptionCache::add_address_and_handler(address addr, address handler) {
-  if (test_address(addr) == handler) return true;
-  if (count() < cache_size) {
-    set_pc_at(count(),addr);
-    set_handler_at(count(), handler);
-    increment_count();
-    return true;
-  }
-  return false;
-}
-
-
-// private method for handling exception cache
-// These methods are private, and used to manipulate the exception cache
-// directly.
-ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
-  ExceptionCache* ec = exception_cache();
-  while (ec != NULL) {
-    if (ec->match_exception_with_space(exception)) {
-      return ec;
-    }
-    ec = ec->next();
-  }
-  return NULL;
-}
-
-
-//-----------------------------------------------------------------------------
-
-
-// Helper used by both find_pc_desc methods.
-static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
-  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
-  if (!approximate)
-    return pc->pc_offset() == pc_offset;
-  else
-    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
-}
-
-void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
-  if (initial_pc_desc == NULL) {
-    _pc_descs[0] = NULL; // native method; no PcDescs at all
-    return;
-  }
-  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
-  // reset the cache by filling it with benign (non-null) values
-  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
-  for (int i = 0; i < cache_size; i++)
-    _pc_descs[i] = initial_pc_desc;
-}
-
-PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
-  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
-  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
-
-  // Note: one might think that caching the most recently
-  // read value separately would be a win, but one would be
-  // wrong.  When many threads are updating it, the cache
-  // line it's in would bounce between caches, negating
-  // any benefit.
-
-  // In order to prevent race conditions do not load cache elements
-  // repeatedly, but use a local copy:
-  PcDesc* res;
-
-  // Step one:  Check the most recently added value.
-  res = _pc_descs[0];
-  if (res == NULL) return NULL;  // native method; no PcDescs at all
-  if (match_desc(res, pc_offset, approximate)) {
-    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
-    return res;
-  }
-
-  // Step two:  Check the rest of the LRU cache.
-  for (int i = 1; i < cache_size; ++i) {
-    res = _pc_descs[i];
-    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
-    if (match_desc(res, pc_offset, approximate)) {
-      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
-      return res;
-    }
-  }
-
-  // Report failure.
-  return NULL;
-}
-
-void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
-  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
-  // Update the LRU cache by shifting pc_desc forward.
-  for (int i = 0; i < cache_size; i++)  {
-    PcDesc* next = _pc_descs[i];
-    _pc_descs[i] = pc_desc;
-    pc_desc = next;
-  }
-}
-
-// adjust pcs_size so that it is a multiple of both oopSize and
-// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
-// of oopSize, then 2*sizeof(PcDesc) is)
-static int adjust_pcs_size(int pcs_size) {
-  int nsize = round_to(pcs_size,   oopSize);
-  if ((nsize % sizeof(PcDesc)) != 0) {
-    nsize = pcs_size + sizeof(PcDesc);
-  }
-  assert((nsize % oopSize) == 0, "correct alignment");
-  return nsize;
-}
-
-//-----------------------------------------------------------------------------
-
-
-void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
-  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
-  assert(new_entry != NULL,"Must be non null");
-  assert(new_entry->next() == NULL, "Must be null");
-
-  if (exception_cache() != NULL) {
-    new_entry->set_next(exception_cache());
-  }
-  set_exception_cache(new_entry);
-}
-
-void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
-  ExceptionCache* prev = NULL;
-  ExceptionCache* curr = exception_cache();
-  assert(curr != NULL, "nothing to remove");
-  // find the previous and next entry of ec
-  while (curr != ec) {
-    prev = curr;
-    curr = curr->next();
-    assert(curr != NULL, "ExceptionCache not found");
-  }
-  // now: curr == ec
-  ExceptionCache* next = curr->next();
-  if (prev == NULL) {
-    set_exception_cache(next);
-  } else {
-    prev->set_next(next);
-  }
-  delete curr;
-}
-
-
-// public method for accessing the exception cache
-// These are the public access methods.
-address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
-  // We never grab a lock to read the exception cache, so we may
-  // have false negatives. This is okay, as it can only happen during
-  // the first few exception lookups for a given nmethod.
-  ExceptionCache* ec = exception_cache();
-  while (ec != NULL) {
-    address ret_val;
-    if ((ret_val = ec->match(exception,pc)) != NULL) {
-      return ret_val;
-    }
-    ec = ec->next();
-  }
-  return NULL;
-}
-
-
-void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
-  // There are potential race conditions during exception cache updates, so we
-  // must own the ExceptionCache_lock before doing ANY modifications. Because
-  // we don't lock during reads, it is possible to have several threads attempt
-  // to update the cache with the same data. We need to check for already inserted
-  // copies of the current data before adding it.
-
-  MutexLocker ml(ExceptionCache_lock);
-  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
-
-  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
-    target_entry = new ExceptionCache(exception,pc,handler);
-    add_exception_cache_entry(target_entry);
-  }
-}
-
-
-//-------------end of code for ExceptionCache--------------
-
-
-int nmethod::total_size() const {
-  return
-    consts_size()        +
-    insts_size()         +
-    stub_size()          +
-    scopes_data_size()   +
-    scopes_pcs_size()    +
-    handler_table_size() +
-    nul_chk_table_size();
-}
-
-const char* nmethod::compile_kind() const {
-  if (is_osr_method())     return "osr";
-  if (method() != NULL && is_native_method())  return "c2n";
-  return NULL;
-}
-
-// Fill in default values for various flag fields
-void nmethod::init_defaults() {
-  _state                      = alive;
-  _marked_for_reclamation     = 0;
-  _has_flushed_dependencies   = 0;
-  _speculatively_disconnected = 0;
-  _has_unsafe_access          = 0;
-  _has_method_handle_invokes  = 0;
-  _lazy_critical_native       = 0;
-  _has_wide_vectors           = 0;
-  _marked_for_deoptimization  = 0;
-  _lock_count                 = 0;
-  _stack_traversal_mark       = 0;
-  _unload_reported            = false;           // jvmti state
-
-#ifdef ASSERT
-  _oops_are_stale             = false;
-#endif
-
-  _oops_do_mark_link       = NULL;
-  _jmethod_id              = NULL;
-  _osr_link                = NULL;
-  _scavenge_root_link      = NULL;
-  _scavenge_root_state     = 0;
-  _saved_nmethod_link      = NULL;
-  _compiler                = NULL;
-#ifdef GRAAL
-  _graal_installed_code   = NULL;
-  _triggered_deoptimizations = NULL;
-#endif
-#ifdef HAVE_DTRACE_H
-  _trap_offset             = 0;
-#endif // def HAVE_DTRACE_H
-}
-
-nmethod* nmethod::new_native_nmethod(methodHandle method,
-  int compile_id,
-  CodeBuffer *code_buffer,
-  int vep_offset,
-  int frame_complete,
-  int frame_size,
-  ByteSize basic_lock_owner_sp_offset,
-  ByteSize basic_lock_sp_offset,
-  OopMapSet* oop_maps) {
-  code_buffer->finalize_oop_references(method);
-  // create nmethod
-  nmethod* nm = NULL;
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCache.hpp"
+#include "code/compiledIC.hpp"
+#include "code/dependencies.hpp"
+#include "code/nmethod.hpp"
+#include "code/scopeDesc.hpp"
+#include "compiler/abstractCompiler.hpp"
+#include "compiler/compileBroker.hpp"
+#include "compiler/compileLog.hpp"
+#include "compiler/compilerOracle.hpp"
+#include "compiler/disassembler.hpp"
+#include "interpreter/bytecode.hpp"
+#include "oops/methodData.hpp"
+#include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "prims/jvmtiImpl.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/sweeper.hpp"
+#include "utilities/dtrace.hpp"
+#include "utilities/events.hpp"
+#include "utilities/xmlstream.hpp"
+#ifdef SHARK
+#include "shark/sharkCompiler.hpp"
+#endif
+#ifdef GRAAL
+#include "graal/graalJavaAccess.hpp"
+#endif
+
+#ifdef DTRACE_ENABLED
+
+// Only bother with this argument setup if dtrace is available
+
+#ifndef USDT2
+HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
+  const char*, int, const char*, int, const char*, int, void*, size_t);
+
+HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
+  char*, int, char*, int, char*, int);
+
+#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
+  {                                                                       \
+    Method* m = (method);                                                 \
+    if (m != NULL) {                                                      \
+      Symbol* klass_name = m->klass_name();                               \
+      Symbol* name = m->name();                                           \
+      Symbol* signature = m->signature();                                 \
+      HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
+        klass_name->bytes(), klass_name->utf8_length(),                   \
+        name->bytes(), name->utf8_length(),                               \
+        signature->bytes(), signature->utf8_length());                    \
+    }                                                                     \
+  }
+#else /* USDT2 */
+#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
+  {                                                                       \
+    Method* m = (method);                                                 \
+    if (m != NULL) {                                                      \
+      Symbol* klass_name = m->klass_name();                               \
+      Symbol* name = m->name();                                           \
+      Symbol* signature = m->signature();                                 \
+      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
+        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
+        (char *) name->bytes(), name->utf8_length(),                               \
+        (char *) signature->bytes(), signature->utf8_length());                    \
+    }                                                                     \
+  }
+#endif /* USDT2 */
+
+#else //  ndef DTRACE_ENABLED
+
+#define DTRACE_METHOD_UNLOAD_PROBE(method)
+
+#endif
+
+bool nmethod::is_compiled_by_c1() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
+  if (is_native_method()) return false;
+  return compiler()->is_c1();
+}
+bool nmethod::is_compiled_by_graal() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
+  if (is_native_method()) return false;
+  return compiler()->is_graal();
+}
+bool nmethod::is_compiled_by_c2() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
+  if (is_native_method()) return false;
+  return compiler()->is_c2();
+}
+bool nmethod::is_compiled_by_shark() const {
+  if (is_native_method()) return false;
+  assert(compiler() != NULL, "must be");
+  return compiler()->is_shark();
+}
+
+
+
+//---------------------------------------------------------------------------------
+// NMethod statistics
+// They are printed under various flags, including:
+//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
+// (In the latter two cases, they like other stats are printed to the log only.)
+
+// These variables are put into one block to reduce relocations
+// and make it simpler to print from the debugger.
+static
+struct nmethod_stats_struct {
+  int nmethod_count;
+  int total_size;
+  int relocation_size;
+  int consts_size;
+  int insts_size;
+  int stub_size;
+  int scopes_data_size;
+  int scopes_pcs_size;
+  int dependencies_size;
+  int handler_table_size;
+  int nul_chk_table_size;
+  int oops_size;
+
+  void note_nmethod(nmethod* nm) {
+    nmethod_count += 1;
+    total_size          += nm->size();
+    relocation_size     += nm->relocation_size();
+    consts_size         += nm->consts_size();
+    insts_size          += nm->insts_size();
+    stub_size           += nm->stub_size();
+    oops_size           += nm->oops_size();
+    scopes_data_size    += nm->scopes_data_size();
+    scopes_pcs_size     += nm->scopes_pcs_size();
+    dependencies_size   += nm->dependencies_size();
+    handler_table_size  += nm->handler_table_size();
+    nul_chk_table_size  += nm->nul_chk_table_size();
+  }
+  void print_nmethod_stats() {
+    if (nmethod_count == 0)  return;
+    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
+    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
+    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
+    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
+    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
+    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
+    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
+    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
+    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
+    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
+    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
+    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
+  }
+
+  int native_nmethod_count;
+  int native_total_size;
+  int native_relocation_size;
+  int native_insts_size;
+  int native_oops_size;
+  void note_native_nmethod(nmethod* nm) {
+    native_nmethod_count += 1;
+    native_total_size       += nm->size();
+    native_relocation_size  += nm->relocation_size();
+    native_insts_size       += nm->insts_size();
+    native_oops_size        += nm->oops_size();
+  }
+  void print_native_nmethod_stats() {
+    if (native_nmethod_count == 0)  return;
+    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
+    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
+    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
+    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
+    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
+  }
+
+  int pc_desc_resets;   // number of resets (= number of caches)
+  int pc_desc_queries;  // queries to nmethod::find_pc_desc
+  int pc_desc_approx;   // number of those which have approximate true
+  int pc_desc_repeats;  // number of _pc_descs[0] hits
+  int pc_desc_hits;     // number of LRU cache hits
+  int pc_desc_tests;    // total number of PcDesc examinations
+  int pc_desc_searches; // total number of quasi-binary search steps
+  int pc_desc_adds;     // number of LUR cache insertions
+
+  void print_pc_stats() {
+    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
+                  pc_desc_queries,
+                  (double)(pc_desc_tests + pc_desc_searches)
+                  / pc_desc_queries);
+    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
+                  pc_desc_resets,
+                  pc_desc_queries, pc_desc_approx,
+                  pc_desc_repeats, pc_desc_hits,
+                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
+  }
+} nmethod_stats;
+
+
+//---------------------------------------------------------------------------------
+
+
+ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
+  assert(pc != NULL, "Must be non null");
+  assert(exception.not_null(), "Must be non null");
+  assert(handler != NULL, "Must be non null");
+
+  _count = 0;
+  _exception_type = exception->klass();
+  _next = NULL;
+
+  add_address_and_handler(pc,handler);
+}
+
+
+address ExceptionCache::match(Handle exception, address pc) {
+  assert(pc != NULL,"Must be non null");
+  assert(exception.not_null(),"Must be non null");
+  if (exception->klass() == exception_type()) {
+    return (test_address(pc));
+  }
+
+  return NULL;
+}
+
+
+bool ExceptionCache::match_exception_with_space(Handle exception) {
+  assert(exception.not_null(),"Must be non null");
+  if (exception->klass() == exception_type() && count() < cache_size) {
+    return true;
+  }
+  return false;
+}
+
+
+address ExceptionCache::test_address(address addr) {
+  for (int i=0; i<count(); i++) {
+    if (pc_at(i) == addr) {
+      return handler_at(i);
+    }
+  }
+  return NULL;
+}
+
+
+bool ExceptionCache::add_address_and_handler(address addr, address handler) {
+  if (test_address(addr) == handler) return true;
+  if (count() < cache_size) {
+    set_pc_at(count(),addr);
+    set_handler_at(count(), handler);
+    increment_count();
+    return true;
+  }
+  return false;
+}
+
+
+// private method for handling exception cache
+// These methods are private, and used to manipulate the exception cache
+// directly.
+ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
+  ExceptionCache* ec = exception_cache();
+  while (ec != NULL) {
+    if (ec->match_exception_with_space(exception)) {
+      return ec;
+    }
+    ec = ec->next();
+  }
+  return NULL;
+}
+
+
+//-----------------------------------------------------------------------------
+
+
+// Helper used by both find_pc_desc methods.
+static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
+  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
+  if (!approximate)
+    return pc->pc_offset() == pc_offset;
+  else
+    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
+}
+
+void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
+  if (initial_pc_desc == NULL) {
+    _pc_descs[0] = NULL; // native method; no PcDescs at all
+    return;
+  }
+  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
+  // reset the cache by filling it with benign (non-null) values
+  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
+  for (int i = 0; i < cache_size; i++)
+    _pc_descs[i] = initial_pc_desc;
+}
+
+PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
+  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
+  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
+
+  // Note: one might think that caching the most recently
+  // read value separately would be a win, but one would be
+  // wrong.  When many threads are updating it, the cache
+  // line it's in would bounce between caches, negating
+  // any benefit.
+
+  // In order to prevent race conditions do not load cache elements
+  // repeatedly, but use a local copy:
+  PcDesc* res;
+
+  // Step one:  Check the most recently added value.
+  res = _pc_descs[0];
+  if (res == NULL) return NULL;  // native method; no PcDescs at all
+  if (match_desc(res, pc_offset, approximate)) {
+    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
+    return res;
+  }
+
+  // Step two:  Check the rest of the LRU cache.
+  for (int i = 1; i < cache_size; ++i) {
+    res = _pc_descs[i];
+    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
+    if (match_desc(res, pc_offset, approximate)) {
+      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
+      return res;
+    }
+  }
+
+  // Report failure.
+  return NULL;
+}
+
+void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
+  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
+  // Update the LRU cache by shifting pc_desc forward.
+  for (int i = 0; i < cache_size; i++)  {
+    PcDesc* next = _pc_descs[i];
+    _pc_descs[i] = pc_desc;
+    pc_desc = next;
+  }
+}
+
+// adjust pcs_size so that it is a multiple of both oopSize and
+// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
+// of oopSize, then 2*sizeof(PcDesc) is)
+static int adjust_pcs_size(int pcs_size) {
+  int nsize = round_to(pcs_size,   oopSize);
+  if ((nsize % sizeof(PcDesc)) != 0) {
+    nsize = pcs_size + sizeof(PcDesc);
+  }
+  assert((nsize % oopSize) == 0, "correct alignment");
+  return nsize;
+}
+
+//-----------------------------------------------------------------------------
+
+
+void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
+  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
+  assert(new_entry != NULL,"Must be non null");
+  assert(new_entry->next() == NULL, "Must be null");
+
+  if (exception_cache() != NULL) {
+    new_entry->set_next(exception_cache());
+  }
+  set_exception_cache(new_entry);
+}
+
+void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
+  ExceptionCache* prev = NULL;
+  ExceptionCache* curr = exception_cache();
+  assert(curr != NULL, "nothing to remove");
+  // find the previous and next entry of ec
+  while (curr != ec) {
+    prev = curr;
+    curr = curr->next();
+    assert(curr != NULL, "ExceptionCache not found");
+  }
+  // now: curr == ec
+  ExceptionCache* next = curr->next();
+  if (prev == NULL) {
+    set_exception_cache(next);
+  } else {
+    prev->set_next(next);
+  }
+  delete curr;
+}
+
+
+// public method for accessing the exception cache
+// These are the public access methods.
+address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
+  // We never grab a lock to read the exception cache, so we may
+  // have false negatives. This is okay, as it can only happen during
+  // the first few exception lookups for a given nmethod.
+  ExceptionCache* ec = exception_cache();
+  while (ec != NULL) {
+    address ret_val;
+    if ((ret_val = ec->match(exception,pc)) != NULL) {
+      return ret_val;
+    }
+    ec = ec->next();
+  }
+  return NULL;
+}
+
+
+void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
+  // There are potential race conditions during exception cache updates, so we
+  // must own the ExceptionCache_lock before doing ANY modifications. Because
+  // we don't lock during reads, it is possible to have several threads attempt
+  // to update the cache with the same data. We need to check for already inserted
+  // copies of the current data before adding it.
+
+  MutexLocker ml(ExceptionCache_lock);
+  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
+
+  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
+    target_entry = new ExceptionCache(exception,pc,handler);
+    add_exception_cache_entry(target_entry);
+  }
+}
+
+
+//-------------end of code for ExceptionCache--------------
+
+
+int nmethod::total_size() const {
+  return
+    consts_size()        +
+    insts_size()         +
+    stub_size()          +
+    scopes_data_size()   +
+    scopes_pcs_size()    +
+    handler_table_size() +
+    nul_chk_table_size();
+}
+
+const char* nmethod::compile_kind() const {
+  if (is_osr_method())     return "osr";
+  if (method() != NULL && is_native_method())  return "c2n";
+  return NULL;
+}
+
+// Fill in default values for various flag fields
+void nmethod::init_defaults() {
+  _state                      = alive;
+  _marked_for_reclamation     = 0;
+  _has_flushed_dependencies   = 0;
+  _speculatively_disconnected = 0;
+  _has_unsafe_access          = 0;
+  _has_method_handle_invokes  = 0;
+  _lazy_critical_native       = 0;
+  _has_wide_vectors           = 0;
+  _marked_for_deoptimization  = 0;
+  _lock_count                 = 0;
+  _stack_traversal_mark       = 0;
+  _unload_reported            = false;           // jvmti state
+
+#ifdef ASSERT
+  _oops_are_stale             = false;
+#endif
+
+  _oops_do_mark_link       = NULL;
+  _jmethod_id              = NULL;
+  _osr_link                = NULL;
+  _scavenge_root_link      = NULL;
+  _scavenge_root_state     = 0;
+  _saved_nmethod_link      = NULL;
+  _compiler                = NULL;
+#ifdef GRAAL
+  _graal_installed_code   = NULL;
+  _triggered_deoptimizations = NULL;
+#endif
+#ifdef HAVE_DTRACE_H
+  _trap_offset             = 0;
+#endif // def HAVE_DTRACE_H
+}
+
+nmethod* nmethod::new_native_nmethod(methodHandle method,
+  int compile_id,
+  CodeBuffer *code_buffer,
+  int vep_offset,
+  int frame_complete,
+  int frame_size,
+  ByteSize basic_lock_owner_sp_offset,
+  ByteSize basic_lock_sp_offset,
+  OopMapSet* oop_maps) {
+  code_buffer->finalize_oop_references(method);
+  // create nmethod
+  nmethod* nm = NULL;
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
     if (CodeCache::has_space(native_nmethod_size)) {
       CodeOffsets offsets;
       offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
@@ -521,2512 +519,2512 @@
                                              code_buffer, frame_size,
                                              basic_lock_owner_sp_offset,
                                              basic_lock_sp_offset, oop_maps);
-      NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
+      if (nm != NULL)  nmethod_stats.note_native_nmethod(nm);
       if (PrintAssembly && nm != NULL)
         Disassembler::decode(nm);
     }
-  }
-  // verify nmethod
-  debug_only(if (nm) nm->verify();) // might block
-
-  if (nm != NULL) {
-    nm->log_new_nmethod();
-  }
-
-  return nm;
-}
-
-#ifdef HAVE_DTRACE_H
-nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
-                                     CodeBuffer *code_buffer,
-                                     int vep_offset,
-                                     int trap_offset,
-                                     int frame_complete,
-                                     int frame_size) {
-  code_buffer->finalize_oop_references(method);
-  // create nmethod
-  nmethod* nm = NULL;
-  {
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
+  }
+  // verify nmethod
+  debug_only(if (nm) nm->verify();) // might block
+
+  if (nm != NULL) {
+    nm->log_new_nmethod();
+  }
+
+  return nm;
+}
+
+#ifdef HAVE_DTRACE_H
+nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
+                                     CodeBuffer *code_buffer,
+                                     int vep_offset,
+                                     int trap_offset,
+                                     int frame_complete,
+                                     int frame_size) {
+  code_buffer->finalize_oop_references(method);
+  // create nmethod
+  nmethod* nm = NULL;
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
     if (CodeCache::has_space(nmethod_size)) {
       CodeOffsets offsets;
       offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
       offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
       offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
-
+
       nm = new (nmethod_size) nmethod(method(), nmethod_size,
                                       &offsets, code_buffer, frame_size);
-
-      NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
+
+      if (nm != NULL)  nmethod_stats.note_nmethod(nm);
       if (PrintAssembly && nm != NULL)
         Disassembler::decode(nm);
     }
-  }
-  // verify nmethod
-  debug_only(if (nm) nm->verify();) // might block
-
-  if (nm != NULL) {
-    nm->log_new_nmethod();
-  }
-
-  return nm;
-}
-
-#endif // def HAVE_DTRACE_H
-
-nmethod* nmethod::new_nmethod(methodHandle method,
-  int compile_id,
-  int entry_bci,
-  CodeOffsets* offsets,
-  int orig_pc_offset,
-  DebugInformationRecorder* debug_info,
-  Dependencies* dependencies,
-  CodeBuffer* code_buffer, int frame_size,
-  OopMapSet* oop_maps,
-  ExceptionHandlerTable* handler_table,
-  ImplicitExceptionTable* nul_chk_table,
-  AbstractCompiler* compiler,
-  int comp_level,
-  GrowableArray<jlong>* leaf_graph_ids
-#ifdef GRAAL
-  , Handle installed_code,
-  Handle triggered_deoptimizations
-#endif
-)
-{
-  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
-  code_buffer->finalize_oop_references(method);
-  int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize);
-  // create nmethod
-  nmethod* nm = NULL;
-  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    int nmethod_size =
-      allocation_size(code_buffer, sizeof(nmethod))
-      + adjust_pcs_size(debug_info->pcs_size())
-      + round_to(dependencies->size_in_bytes() , oopSize)
-      + round_to(handler_table->size_in_bytes(), oopSize)
-      + round_to(nul_chk_table->size_in_bytes(), oopSize)
-      + round_to(debug_info->data_size()       , oopSize)
+  }
+  // verify nmethod
+  debug_only(if (nm) nm->verify();) // might block
+
+  if (nm != NULL) {
+    nm->log_new_nmethod();
+  }
+
+  return nm;
+}
+
+#endif // def HAVE_DTRACE_H
+
+nmethod* nmethod::new_nmethod(methodHandle method,
+  int compile_id,
+  int entry_bci,
+  CodeOffsets* offsets,
+  int orig_pc_offset,
+  DebugInformationRecorder* debug_info,
+  Dependencies* dependencies,
+  CodeBuffer* code_buffer, int frame_size,
+  OopMapSet* oop_maps,
+  ExceptionHandlerTable* handler_table,
+  ImplicitExceptionTable* nul_chk_table,
+  AbstractCompiler* compiler,
+  int comp_level,
+  GrowableArray<jlong>* leaf_graph_ids
+#ifdef GRAAL
+  , Handle installed_code,
+  Handle triggered_deoptimizations
+#endif
+)
+{
+  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
+  code_buffer->finalize_oop_references(method);
+  int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize);
+  // create nmethod
+  nmethod* nm = NULL;
+  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    int nmethod_size =
+      allocation_size(code_buffer, sizeof(nmethod))
+      + adjust_pcs_size(debug_info->pcs_size())
+      + round_to(dependencies->size_in_bytes() , oopSize)
+      + round_to(handler_table->size_in_bytes(), oopSize)
+      + round_to(nul_chk_table->size_in_bytes(), oopSize)
+      + round_to(debug_info->data_size()       , oopSize)
       + leaf_graph_ids_size;
-    if (CodeCache::has_space(nmethod_size)) {
-      nm = new (nmethod_size)
-      nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
-              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
-              oop_maps,
-              handler_table,
-              nul_chk_table,
-              compiler,
-              comp_level,
-              leaf_graph_ids
-#ifdef GRAAL
-              , installed_code,
-              triggered_deoptimizations
-#endif
+    if (CodeCache::has_space(nmethod_size)) {
+      nm = new (nmethod_size)
+      nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
+              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
+              oop_maps,
+              handler_table,
+              nul_chk_table,
+              compiler,
+              comp_level,
+              leaf_graph_ids
+#ifdef GRAAL
+              , installed_code,
+              triggered_deoptimizations
+#endif
               );
-    }
-    if (nm != NULL) {
-      // To make dependency checking during class loading fast, record
-      // the nmethod dependencies in the classes it is dependent on.
-      // This allows the dependency checking code to simply walk the
-      // class hierarchy above the loaded class, checking only nmethods
-      // which are dependent on those classes.  The slow way is to
-      // check every nmethod for dependencies which makes it linear in
-      // the number of methods compiled.  For applications with a lot
-      // classes the slow way is too slow.
-      for (Dependencies::DepStream deps(nm); deps.next(); ) {
-        Klass* klass = deps.context_type();
-        if (klass == NULL)  continue;  // ignore things like evol_method
-
-        // record this nmethod as dependent on this klass
-        InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
-      }
-    }
-    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
-    if (PrintAssembly && nm != NULL)
-      Disassembler::decode(nm);
-  }
-
-  // verify nmethod
-  debug_only(if (nm) nm->verify();) // might block
-
-  if (nm != NULL) {
-    nm->log_new_nmethod();
-  }
-
-  // done
-  return nm;
-}
-
-
-// For native wrappers
-nmethod::nmethod(
-  Method* method,
-  int nmethod_size,
-  int compile_id,
-  CodeOffsets* offsets,
-  CodeBuffer* code_buffer,
-  int frame_size,
-  ByteSize basic_lock_owner_sp_offset,
-  ByteSize basic_lock_sp_offset,
-  OopMapSet* oop_maps )
-  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
-             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
-  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
-  _native_basic_lock_sp_offset(basic_lock_sp_offset)
-{
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-    assert_locked_or_safepoint(CodeCache_lock);
-
-    init_defaults();
-    _method                  = method;
-    _entry_bci               = InvocationEntryBci;
-    // We have no exception handler or deopt handler make the
-    // values something that will never match a pc like the nmethod vtable entry
-    _exception_offset        = 0;
-    _deoptimize_offset       = 0;
-    _deoptimize_mh_offset    = 0;
-    _orig_pc_offset          = 0;
-
-    _consts_offset           = data_offset();
-    _stub_offset             = data_offset();
-    _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
-    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
-    _scopes_pcs_offset       = _scopes_data_offset;
-    _dependencies_offset     = _scopes_pcs_offset;
-    _handler_table_offset    = _dependencies_offset;
-    _nul_chk_table_offset    = _handler_table_offset;
-    _leaf_graph_ids_offset   = _nul_chk_table_offset;
-    _nmethod_end_offset      = _leaf_graph_ids_offset;
-    _compile_id              = compile_id;
-    _comp_level              = CompLevel_none;
-    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
-    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
-    _osr_entry_point         = NULL;
-    _exception_cache         = NULL;
-    _pc_desc_cache.reset_to(NULL);
-
-    code_buffer->copy_values_to(this);
-    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
-      CodeCache::add_scavenge_root_nmethod(this);
-    }
-    debug_only(verify_scavenge_root_oops());
-    CodeCache::commit(this);
-  }
-
-  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
-    ttyLocker ttyl;  // keep the following output all in one block
-    // This output goes directly to the tty, not the compiler log.
-    // To enable tools to match it up with the compilation activity,
-    // be sure to tag this tty output with the compile ID.
-    if (xtty != NULL) {
-      xtty->begin_head("print_native_nmethod");
-      xtty->method(_method);
-      xtty->stamp();
-      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
-    }
-    // print the header part first
-    print();
-    // then print the requested information
-    if (PrintNativeNMethods) {
-      print_code();
-      if (oop_maps != NULL) {
-        oop_maps->print();
-      }
-    }
-    if (PrintRelocations) {
-      print_relocations();
-    }
-    if (xtty != NULL) {
-      xtty->tail("print_native_nmethod");
-    }
-  }
-}
-
-// For dtrace wrappers
-#ifdef HAVE_DTRACE_H
-nmethod::nmethod(
-  Method* method,
-  int nmethod_size,
-  CodeOffsets* offsets,
-  CodeBuffer* code_buffer,
-  int frame_size)
-  : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
-             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
-  _native_receiver_sp_offset(in_ByteSize(-1)),
-  _native_basic_lock_sp_offset(in_ByteSize(-1))
-{
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-    assert_locked_or_safepoint(CodeCache_lock);
-
-    init_defaults();
-    _method                  = method;
-    _entry_bci               = InvocationEntryBci;
-    // We have no exception handler or deopt handler make the
-    // values something that will never match a pc like the nmethod vtable entry
-    _exception_offset        = 0;
-    _deoptimize_offset       = 0;
-    _deoptimize_mh_offset    = 0;
-    _unwind_handler_offset   = -1;
-    _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
-    _orig_pc_offset          = 0;
-    _consts_offset           = data_offset();
-    _stub_offset             = data_offset();
-    _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
-    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
-    _scopes_pcs_offset       = _scopes_data_offset;
-    _dependencies_offset     = _scopes_pcs_offset;
-    _handler_table_offset    = _dependencies_offset;
-    _nul_chk_table_offset    = _handler_table_offset;
-    _nmethod_end_offset      = _nul_chk_table_offset;
-    _compile_id              = 0;  // default
-    _comp_level              = CompLevel_none;
-    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
-    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
-    _osr_entry_point         = NULL;
-    _exception_cache         = NULL;
-    _pc_desc_cache.reset_to(NULL);
-
-    code_buffer->copy_values_to(this);
-    debug_only(verify_scavenge_root_oops());
-    CodeCache::commit(this);
-  }
-
-  if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
-    ttyLocker ttyl;  // keep the following output all in one block
-    // This output goes directly to the tty, not the compiler log.
-    // To enable tools to match it up with the compilation activity,
-    // be sure to tag this tty output with the compile ID.
-    if (xtty != NULL) {
-      xtty->begin_head("print_dtrace_nmethod");
-      xtty->method(_method);
-      xtty->stamp();
-      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
-    }
-    // print the header part first
-    print();
-    // then print the requested information
-    if (PrintNMethods) {
-      print_code();
-    }
-    if (PrintRelocations) {
-      print_relocations();
-    }
-    if (xtty != NULL) {
-      xtty->tail("print_dtrace_nmethod");
-    }
-  }
-}
-#endif // def HAVE_DTRACE_H
-
-void* nmethod::operator new(size_t size, int nmethod_size) {
+    }
+    if (nm != NULL) {
+      // To make dependency checking during class loading fast, record
+      // the nmethod dependencies in the classes it is dependent on.
+      // This allows the dependency checking code to simply walk the
+      // class hierarchy above the loaded class, checking only nmethods
+      // which are dependent on those classes.  The slow way is to
+      // check every nmethod for dependencies which makes it linear in
+      // the number of methods compiled.  For applications with a lot
+      // classes the slow way is too slow.
+      for (Dependencies::DepStream deps(nm); deps.next(); ) {
+        Klass* klass = deps.context_type();
+        if (klass == NULL)  continue;  // ignore things like evol_method
+
+        // record this nmethod as dependent on this klass
+        InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
+      }
+    }
+    if (nm != NULL)  nmethod_stats.note_nmethod(nm);
+    if (PrintAssembly && nm != NULL)
+      Disassembler::decode(nm);
+  }
+
+  // verify nmethod
+  debug_only(if (nm) nm->verify();) // might block
+
+  if (nm != NULL) {
+    nm->log_new_nmethod();
+  }
+
+  // done
+  return nm;
+}
+
+
+// For native wrappers
+nmethod::nmethod(
+  Method* method,
+  int nmethod_size,
+  int compile_id,
+  CodeOffsets* offsets,
+  CodeBuffer* code_buffer,
+  int frame_size,
+  ByteSize basic_lock_owner_sp_offset,
+  ByteSize basic_lock_sp_offset,
+  OopMapSet* oop_maps )
+  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
+             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
+  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
+  _native_basic_lock_sp_offset(basic_lock_sp_offset)
+{
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+    assert_locked_or_safepoint(CodeCache_lock);
+
+    init_defaults();
+    _method                  = method;
+    _entry_bci               = InvocationEntryBci;
+    // We have no exception handler or deopt handler make the
+    // values something that will never match a pc like the nmethod vtable entry
+    _exception_offset        = 0;
+    _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
+    _orig_pc_offset          = 0;
+
+    _consts_offset           = data_offset();
+    _stub_offset             = data_offset();
+    _oops_offset             = data_offset();
+    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
+    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
+    _scopes_pcs_offset       = _scopes_data_offset;
+    _dependencies_offset     = _scopes_pcs_offset;
+    _handler_table_offset    = _dependencies_offset;
+    _nul_chk_table_offset    = _handler_table_offset;
+    _leaf_graph_ids_offset   = _nul_chk_table_offset;
+    _nmethod_end_offset      = _leaf_graph_ids_offset;
+    _compile_id              = compile_id;
+    _comp_level              = CompLevel_none;
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
+    _osr_entry_point         = NULL;
+    _exception_cache         = NULL;
+    _pc_desc_cache.reset_to(NULL);
+
+    code_buffer->copy_values_to(this);
+    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+      CodeCache::add_scavenge_root_nmethod(this);
+    }
+    debug_only(verify_scavenge_root_oops());
+    CodeCache::commit(this);
+  }
+
+  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
+    ttyLocker ttyl;  // keep the following output all in one block
+    // This output goes directly to the tty, not the compiler log.
+    // To enable tools to match it up with the compilation activity,
+    // be sure to tag this tty output with the compile ID.
+    if (xtty != NULL) {
+      xtty->begin_head("print_native_nmethod");
+      xtty->method(_method);
+      xtty->stamp();
+      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
+    }
+    // print the header part first
+    print();
+    // then print the requested information
+    if (PrintNativeNMethods) {
+      print_code();
+      if (oop_maps != NULL) {
+        oop_maps->print();
+      }
+    }
+    if (PrintRelocations) {
+      print_relocations();
+    }
+    if (xtty != NULL) {
+      xtty->tail("print_native_nmethod");
+    }
+  }
+}
+
+// For dtrace wrappers
+#ifdef HAVE_DTRACE_H
+nmethod::nmethod(
+  Method* method,
+  int nmethod_size,
+  CodeOffsets* offsets,
+  CodeBuffer* code_buffer,
+  int frame_size)
+  : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
+             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
+  _native_receiver_sp_offset(in_ByteSize(-1)),
+  _native_basic_lock_sp_offset(in_ByteSize(-1))
+{
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+    assert_locked_or_safepoint(CodeCache_lock);
+
+    init_defaults();
+    _method                  = method;
+    _entry_bci               = InvocationEntryBci;
+    // We have no exception handler or deopt handler make the
+    // values something that will never match a pc like the nmethod vtable entry
+    _exception_offset        = 0;
+    _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
+    _unwind_handler_offset   = -1;
+    _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
+    _orig_pc_offset          = 0;
+    _consts_offset           = data_offset();
+    _stub_offset             = data_offset();
+    _oops_offset             = data_offset();
+    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
+    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
+    _scopes_pcs_offset       = _scopes_data_offset;
+    _dependencies_offset     = _scopes_pcs_offset;
+    _handler_table_offset    = _dependencies_offset;
+    _nul_chk_table_offset    = _handler_table_offset;
+    _nmethod_end_offset      = _nul_chk_table_offset;
+    _compile_id              = 0;  // default
+    _comp_level              = CompLevel_none;
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
+    _osr_entry_point         = NULL;
+    _exception_cache         = NULL;
+    _pc_desc_cache.reset_to(NULL);
+
+    code_buffer->copy_values_to(this);
+    debug_only(verify_scavenge_root_oops());
+    CodeCache::commit(this);
+  }
+
+  if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
+    ttyLocker ttyl;  // keep the following output all in one block
+    // This output goes directly to the tty, not the compiler log.
+    // To enable tools to match it up with the compilation activity,
+    // be sure to tag this tty output with the compile ID.
+    if (xtty != NULL) {
+      xtty->begin_head("print_dtrace_nmethod");
+      xtty->method(_method);
+      xtty->stamp();
+      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
+    }
+    // print the header part first
+    print();
+    // then print the requested information
+    if (PrintNMethods) {
+      print_code();
+    }
+    if (PrintRelocations) {
+      print_relocations();
+    }
+    if (xtty != NULL) {
+      xtty->tail("print_dtrace_nmethod");
+    }
+  }
+}
+#endif // def HAVE_DTRACE_H
+
+void* nmethod::operator new(size_t size, int nmethod_size) {
   void*  alloc = CodeCache::allocate(nmethod_size);
   guarantee(alloc != NULL, "CodeCache should have enough space");
   return alloc;
-}
-
-
-nmethod::nmethod(
-  Method* method,
-  int nmethod_size,
-  int compile_id,
-  int entry_bci,
-  CodeOffsets* offsets,
-  int orig_pc_offset,
-  DebugInformationRecorder* debug_info,
-  Dependencies* dependencies,
-  CodeBuffer *code_buffer,
-  int frame_size,
-  OopMapSet* oop_maps,
-  ExceptionHandlerTable* handler_table,
-  ImplicitExceptionTable* nul_chk_table,
-  AbstractCompiler* compiler,
-  int comp_level,
-  GrowableArray<jlong>* leaf_graph_ids
-#ifdef GRAAL
-  , Handle installed_code,
-  Handle triggered_deoptimizations
-#endif
-  )
-  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
-             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
-  _native_receiver_sp_offset(in_ByteSize(-1)),
-  _native_basic_lock_sp_offset(in_ByteSize(-1))
-{
-  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
-  {
-    debug_only(No_Safepoint_Verifier nsv;)
-    assert_locked_or_safepoint(CodeCache_lock);
-
-    init_defaults();
-    _method                  = method;
-    _entry_bci               = entry_bci;
-    _compile_id              = compile_id;
-    _comp_level              = comp_level;
-    _compiler                = compiler;
-    _orig_pc_offset          = orig_pc_offset;
-
-    // Section offsets
-    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
-    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
-
-#ifdef GRAAL
-    _graal_installed_code = installed_code();
-    _triggered_deoptimizations = (typeArrayOop)triggered_deoptimizations();
-#endif
-    if (compiler->is_graal()) {
-      // Graal might not produce any stub sections
-      if (offsets->value(CodeOffsets::Exceptions) != -1) {
-        _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
-      } else {
-        _exception_offset = -1;
-      }
-      if (offsets->value(CodeOffsets::Deopt) != -1) {
-        _deoptimize_offset       = code_offset()          + offsets->value(CodeOffsets::Deopt);
-      } else {
-        _deoptimize_offset = -1;
-      }
-      if (offsets->value(CodeOffsets::DeoptMH) != -1) {
-        _deoptimize_mh_offset  = code_offset()          + offsets->value(CodeOffsets::DeoptMH);
-      } else {
-        _deoptimize_mh_offset  = -1;
-      }
-    } else {
-      // Exception handler and deopt handler are in the stub section
-      assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
-      assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
-
-      _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
-      _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
-      if (offsets->value(CodeOffsets::DeoptMH) != -1) {
-        _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
-      } else {
-        _deoptimize_mh_offset  = -1;
-      }
-    }
-    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
-      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
-    } else {
-      _unwind_handler_offset = -1;
-    }
-
-    int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize);
-
-    _oops_offset             = data_offset();
-    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
-    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
-
-    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
-    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
-    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
-    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
-    _leaf_graph_ids_offset   = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
-    _nmethod_end_offset      = _leaf_graph_ids_offset + leaf_graph_ids_size;
-
-    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
-    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
-    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
-    _exception_cache         = NULL;
-    _pc_desc_cache.reset_to(scopes_pcs_begin());
-
-    // Copy contents of ScopeDescRecorder to nmethod
-    code_buffer->copy_values_to(this);
-    debug_info->copy_to(this);
-    dependencies->copy_to(this);
-    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
-      CodeCache::add_scavenge_root_nmethod(this);
-    }
-    debug_only(verify_scavenge_root_oops());
-
-    CodeCache::commit(this);
-
-    // Copy contents of ExceptionHandlerTable to nmethod
-    handler_table->copy_to(this);
-    nul_chk_table->copy_to(this);
-
-    if (leaf_graph_ids != NULL && leaf_graph_ids_size > 0) {
-      memcpy(leaf_graph_ids_begin(), leaf_graph_ids->adr_at(0), leaf_graph_ids_size);
-    }
-
-    // we use the information of entry points to find out if a method is
-    // static or non static
-    assert(compiler->is_c2() ||
-           _method->is_static() == (entry_point() == _verified_entry_point),
-           " entry points must be same for static methods and vice versa");
-  }
-
-  bool printnmethods = PrintNMethods
-    || CompilerOracle::should_print(_method)
-    || CompilerOracle::has_option_string(_method, "PrintNMethods");
-  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
-    print_nmethod(printnmethods);
-  }
-}
-
-
-// Print a short set of xml attributes to identify this nmethod.  The
-// output should be embedded in some other element.
-void nmethod::log_identity(xmlStream* log) const {
-  log->print(" compile_id='%d'", compile_id());
-  const char* nm_kind = compile_kind();
-  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
-  if (compiler() != NULL) {
-    log->print(" compiler='%s'", compiler()->name());
-  }
-  if (TieredCompilation) {
-    log->print(" level='%d'", comp_level());
-  }
-}
-
-
-#define LOG_OFFSET(log, name)                    \
-  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
-    log->print(" " XSTR(name) "_offset='%d'"    , \
-               (intptr_t)name##_begin() - (intptr_t)this)
-
-
-void nmethod::log_new_nmethod() const {
-  if (LogCompilation && xtty != NULL) {
-    ttyLocker ttyl;
-    HandleMark hm;
-    xtty->begin_elem("nmethod");
-    log_identity(xtty);
-    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
-    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
-
-    LOG_OFFSET(xtty, relocation);
-    LOG_OFFSET(xtty, consts);
-    LOG_OFFSET(xtty, insts);
-    LOG_OFFSET(xtty, stub);
-    LOG_OFFSET(xtty, scopes_data);
-    LOG_OFFSET(xtty, scopes_pcs);
-    LOG_OFFSET(xtty, dependencies);
-    LOG_OFFSET(xtty, handler_table);
-    LOG_OFFSET(xtty, nul_chk_table);
-    LOG_OFFSET(xtty, oops);
-
-    xtty->method(method());
-    xtty->stamp();
-    xtty->end_elem();
-  }
-}
-
-#undef LOG_OFFSET
-
-
-// Print out more verbose output usually for a newly created nmethod.
-void nmethod::print_on(outputStream* st, const char* msg) const {
-  if (st != NULL) {
-    ttyLocker ttyl;
-    if (WizardMode) {
-      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
-      st->print_cr(" (" INTPTR_FORMAT ")", this);
-    } else {
-      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
-    }
-  }
-}
-
-
-void nmethod::print_nmethod(bool printmethod) {
-  ttyLocker ttyl;  // keep the following output all in one block
-  if (xtty != NULL) {
-    xtty->begin_head("print_nmethod");
-    xtty->stamp();
-    xtty->end_head();
-  }
-  // print the header part first
-  print();
-  // then print the requested information
-  if (printmethod) {
-    print_code();
-    print_pcs();
-    if (oop_maps()) {
-      oop_maps()->print();
-    }
-  }
-  if (PrintDebugInfo) {
-    print_scopes();
-  }
-  if (PrintRelocations) {
-    print_relocations();
-  }
-  if (PrintDependencies) {
-    print_dependencies();
-  }
-  if (PrintExceptionHandlers) {
-    print_handler_table();
-    print_nul_chk_table();
-  }
-  if (xtty != NULL) {
-    xtty->tail("print_nmethod");
-  }
-}
-
-
-// Promote one word from an assembly-time handle to a live embedded oop.
-inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
-  if (handle == NULL ||
-      // As a special case, IC oops are initialized to 1 or -1.
-      handle == (jobject) Universe::non_oop_word()) {
-    (*dest) = (oop) handle;
-  } else {
-    (*dest) = JNIHandles::resolve_non_null(handle);
-  }
-}
-
-
-// Have to have the same name because it's called by a template
-void nmethod::copy_values(GrowableArray<jobject>* array) {
-  int length = array->length();
-  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
-  oop* dest = oops_begin();
-  for (int index = 0 ; index < length; index++) {
-    initialize_immediate_oop(&dest[index], array->at(index));
-  }
-
-  // Now we can fix up all the oops in the code.  We need to do this
-  // in the code because the assembler uses jobjects as placeholders.
-  // The code and relocations have already been initialized by the
-  // CodeBlob constructor, so it is valid even at this early point to
-  // iterate over relocations and patch the code.
-  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
-}
-
-void nmethod::copy_values(GrowableArray<Metadata*>* array) {
-  int length = array->length();
-  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
-  Metadata** dest = metadata_begin();
-  for (int index = 0 ; index < length; index++) {
-    dest[index] = array->at(index);
-  }
-}
-
-bool nmethod::is_at_poll_return(address pc) {
-  RelocIterator iter(this, pc, pc+1);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::poll_return_type)
-      return true;
-  }
-  return false;
-}
-
-
-bool nmethod::is_at_poll_or_poll_return(address pc) {
-  RelocIterator iter(this, pc, pc+1);
-  while (iter.next()) {
-    relocInfo::relocType t = iter.type();
-    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
-      return true;
-  }
-  return false;
-}
-
-
-void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
-  // re-patch all oop-bearing instructions, just in case some oops moved
-  RelocIterator iter(this, begin, end);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* reloc = iter.oop_reloc();
-      if (initialize_immediates && reloc->oop_is_immediate()) {
-        oop* dest = reloc->oop_addr();
-        initialize_immediate_oop(dest, (jobject) *dest);
-      }
-      // Refresh the oop-related bits of this instruction.
-      reloc->fix_oop_relocation();
-    } else if (iter.type() == relocInfo::metadata_type) {
-      metadata_Relocation* reloc = iter.metadata_reloc();
-      reloc->fix_metadata_relocation();
-    }
-
-    // There must not be any interfering patches or breakpoints.
-    assert(!(iter.type() == relocInfo::breakpoint_type
-             && iter.breakpoint_reloc()->active()),
-           "no active breakpoint");
-  }
-}
-
-
-void nmethod::verify_oop_relocations() {
-  // Ensure sure that the code matches the current oop values
-  RelocIterator iter(this, NULL, NULL);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* reloc = iter.oop_reloc();
-      if (!reloc->oop_is_immediate()) {
-        reloc->verify_oop_relocation();
-      }
-    }
-  }
-}
-
-
-ScopeDesc* nmethod::scope_desc_at(address pc) {
-  PcDesc* pd = pc_desc_at(pc);
-  guarantee(pd != NULL, "scope must be present");
-  return new ScopeDesc(this, pd->scope_decode_offset(),
-                       pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
-                       pd->return_oop());
-}
-
-
-void nmethod::clear_inline_caches() {
-  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
-  if (is_zombie()) {
-    return;
-  }
-
-  RelocIterator iter(this);
-  while (iter.next()) {
-    iter.reloc()->clear_inline_cache();
-  }
-}
-
-
-void nmethod::cleanup_inline_caches() {
-
-  assert_locked_or_safepoint(CompiledIC_lock);
-
-  // If the method is not entrant or zombie then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (!is_in_use()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // This means that the low_boundary is going to be a little too high.
-    // This shouldn't matter, since oops of non-entrant methods are never used.
-    // In fact, why are we bothering to look at oops in a non-entrant method??
-  }
-
-  // Find all calls in an nmethod, and clear the ones that points to zombie methods
-  ResourceMark rm;
-  RelocIterator iter(this, low_boundary);
-  while(iter.next()) {
-    switch(iter.type()) {
-      case relocInfo::virtual_call_type:
-      case relocInfo::opt_virtual_call_type: {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
-        // Ok, to lookup references to zombies here
-        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
-        if( cb != NULL && cb->is_nmethod() ) {
-          nmethod* nm = (nmethod*)cb;
-          // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
-        }
-        break;
-      }
-      case relocInfo::static_call_type: {
-        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
-        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
-        if( cb != NULL && cb->is_nmethod() ) {
-          nmethod* nm = (nmethod*)cb;
-          // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
-        }
-        break;
-      }
-    }
-  }
-}
-
-// This is a private interface with the sweeper.
-void nmethod::mark_as_seen_on_stack() {
-  assert(is_not_entrant(), "must be a non-entrant method");
-  // Set the traversal mark to ensure that the sweeper does 2
-  // cleaning passes before moving to zombie.
-  set_stack_traversal_mark(NMethodSweeper::traversal_count());
-}
-
-// Tell if a non-entrant method can be converted to a zombie (i.e.,
-// there are no activations on the stack, not in use by the VM,
-// and not in use by the ServiceThread)
-bool nmethod::can_not_entrant_be_converted() {
-  assert(is_not_entrant(), "must be a non-entrant method");
-
-  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
-  // count can be greater than the stack traversal count before it hits the
-  // nmethod for the second time.
-  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
-         !is_locked_by_vm();
-}
-
-void nmethod::inc_decompile_count() {
-  if (!is_compiled_by_c2() && !is_compiled_by_graal()) return;
-  // Could be gated by ProfileTraps, but do not bother...
-  Method* m = method();
-  if (m == NULL)  return;
-  MethodData* mdo = m->method_data();
-  if (mdo == NULL)  return;
-  // There is a benign race here.  See comments in methodData.hpp.
-  mdo->inc_decompile_count();
-}
-
-void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
-
-  post_compiled_method_unload();
-
-  // Since this nmethod is being unloaded, make sure that dependencies
-  // recorded in instanceKlasses get flushed and pass non-NULL closure to
-  // indicate that this work is being done during a GC.
-  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
-  assert(is_alive != NULL, "Should be non-NULL");
-  // A non-NULL is_alive closure indicates that this is being called during GC.
-  flush_dependencies(is_alive);
-
-  // Break cycle between nmethod & method
-  if (TraceClassUnloading && WizardMode) {
-    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
-                  " unloadable], Method*(" INTPTR_FORMAT
-                  "), cause(" INTPTR_FORMAT ")",
-                  this, (address)_method, (address)cause);
-    if (!Universe::heap()->is_gc_active())
-      cause->klass()->print();
-  }
-  // Unlink the osr method, so we do not look this up again
-  if (is_osr_method()) {
-    invalidate_osr_method();
-  }
-  // If _method is already NULL the Method* is about to be unloaded,
-  // so we don't have to break the cycle. Note that it is possible to
-  // have the Method* live here, in case we unload the nmethod because
-  // it is pointing to some oop (other than the Method*) being unloaded.
-  if (_method != NULL) {
-    // OSR methods point to the Method*, but the Method* does not
-    // point back!
-    if (_method->code() == this) {
-      _method->clear_code(); // Break a cycle
-    }
-    _method = NULL;            // Clear the method of this dead nmethod
-  }
-
-#ifdef GRAAL
-  // The method can only be unloaded after the pointer to the installed code
-  // Java wrapper is no longer alive. Here we need to clear out this weak
-  // reference to the dead object.
-  if (_graal_installed_code != NULL) {
-    _graal_installed_code = NULL;
-  }
-#endif
-
-  // Make the class unloaded - i.e., change state and notify sweeper
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  if (is_in_use()) {
-    // Transitioning directly from live to unloaded -- so
-    // we need to force a cache clean-up; remember this
-    // for later on.
-    CodeCache::set_needs_cache_clean(true);
-  }
-  _state = unloaded;
-
-  // Log the unloading.
-  log_state_change();
-
-  // The Method* is gone at this point
-  assert(_method == NULL, "Tautology");
-
-  set_osr_link(NULL);
-  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
-  NMethodSweeper::notify(this);
-}
-
-void nmethod::invalidate_osr_method() {
-  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
-  // Remove from list of active nmethods
-  if (method() != NULL)
-    method()->method_holder()->remove_osr_nmethod(this);
-  // Set entry as invalid
-  _entry_bci = InvalidOSREntryBci;
-}
-
-void nmethod::log_state_change() const {
-  if (LogCompilation) {
-    if (xtty != NULL) {
-      ttyLocker ttyl;  // keep the following output all in one block
-      if (_state == unloaded) {
-        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
-                         os::current_thread_id());
-      } else {
-        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
-                         os::current_thread_id(),
-                         (_state == zombie ? " zombie='1'" : ""));
-      }
-      log_identity(xtty);
-      xtty->stamp();
-      xtty->end_elem();
-    }
-  }
-  if (PrintCompilation && _state != unloaded) {
-    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
-  }
-}
-
-// Common functionality for both make_not_entrant and make_zombie
-bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
-  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
-  assert(!is_zombie(), "should not already be a zombie");
-
-  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
-  nmethodLocker nml(this);
-  methodHandle the_method(method());
-  No_Safepoint_Verifier nsv;
-
-  {
-    // invalidate osr nmethod before acquiring the patching lock since
-    // they both acquire leaf locks and we don't want a deadlock.
-    // This logic is equivalent to the logic below for patching the
-    // verified entry point of regular methods.
-    if (is_osr_method()) {
-      // this effectively makes the osr nmethod not entrant
-      invalidate_osr_method();
-    }
-
-    // Enter critical section.  Does not block for safepoint.
-    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
-
-    if (_state == state) {
-      // another thread already performed this transition so nothing
-      // to do, but return false to indicate this.
-      return false;
-    }
-
-    // The caller can be calling the method statically or through an inline
-    // cache call.
-    if (!is_osr_method() && !is_not_entrant()) {
-      address stub = SharedRuntime::get_handle_wrong_method_stub();
-#ifdef GRAAL
-      if (_graal_installed_code != NULL && !HotSpotNmethod::isDefault(_graal_installed_code)) {
-        // This was manually installed machine code. Patch entry with stub that throws an exception.
-        stub = SharedRuntime::get_deoptimized_installed_code_stub();
-      }
-#endif
-      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), stub);
-    }
-
-    if (is_in_use()) {
-      // It's a true state change, so mark the method as decompiled.
-      // Do it only for transition from alive.
-      inc_decompile_count();
-    }
-
-    // Change state
-    _state = state;
-
-    // Log the transition once
-    log_state_change();
-
-    // Remove nmethod from method.
-    // We need to check if both the _code and _from_compiled_code_entry_point
-    // refer to this nmethod because there is a race in setting these two fields
-    // in Method* as seen in bugid 4947125.
-    // If the vep() points to the zombie nmethod, the memory for the nmethod
-    // could be flushed and the compiler and vtable stubs could still call
-    // through it.
-    if (method() != NULL && (method()->code() == this ||
-                             method()->from_compiled_entry() == verified_entry_point())) {
-      HandleMark hm;
-      method()->clear_code();
-    }
-
-    if (state == not_entrant) {
-      mark_as_seen_on_stack();
-    }
-
-  } // leave critical region under Patching_lock
-
-  // When the nmethod becomes zombie it is no longer alive so the
-  // dependencies must be flushed.  nmethods in the not_entrant
-  // state will be flushed later when the transition to zombie
-  // happens or they get unloaded.
-  if (state == zombie) {
-    {
-      // Flushing dependecies must be done before any possible
-      // safepoint can sneak in, otherwise the oops used by the
-      // dependency logic could have become stale.
-      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      flush_dependencies(NULL);
-    }
-
-    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
-    // event and it hasn't already been reported for this nmethod then
-    // report it now. The event may have been reported earilier if the GC
-    // marked it for unloading). JvmtiDeferredEventQueue support means
-    // we no longer go to a safepoint here.
-    post_compiled_method_unload();
-
-#ifdef ASSERT
-    // It's no longer safe to access the oops section since zombie
-    // nmethods aren't scanned for GC.
-    _oops_are_stale = true;
-#endif
-  } else {
-    assert(state == not_entrant, "other cases may need to be handled differently");
-  }
-
-  if (TraceCreateZombies) {
-    ResourceMark m;
-    tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", this, this->method()->name_and_sig_as_C_string(), (state == not_entrant) ? "not entrant" : "zombie");
-  }
-
-  // Make sweeper aware that there is a zombie method that needs to be removed
-  NMethodSweeper::notify(this);
-
-  return true;
-}
-
-void nmethod::flush() {
-  // Note that there are no valid oops in the nmethod anymore.
-  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
-  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
-
-  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
-  assert_locked_or_safepoint(CodeCache_lock);
-
-  // completely deallocate this method
-  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
-  if (PrintMethodFlushing) {
-    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
-        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
-  }
-
-  // We need to deallocate any ExceptionCache data.
-  // Note that we do not need to grab the nmethod lock for this, it
-  // better be thread safe if we're disposing of it!
-  ExceptionCache* ec = exception_cache();
-  set_exception_cache(NULL);
-  while(ec != NULL) {
-    ExceptionCache* next = ec->next();
-    delete ec;
-    ec = next;
-  }
-
-  if (on_scavenge_root_list()) {
-    CodeCache::drop_scavenge_root_nmethod(this);
-  }
-
-  if (is_speculatively_disconnected()) {
-    CodeCache::remove_saved_code(this);
-  }
-
-#ifdef SHARK
-  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
-#endif // SHARK
-
-  ((CodeBlob*)(this))->flush();
-
-  CodeCache::free(this);
-}
-
-
-//
-// Notify all classes this nmethod is dependent on that it is no
-// longer dependent. This should only be called in two situations.
-// First, when a nmethod transitions to a zombie all dependents need
-// to be clear.  Since zombification happens at a safepoint there's no
-// synchronization issues.  The second place is a little more tricky.
-// During phase 1 of mark sweep class unloading may happen and as a
-// result some nmethods may get unloaded.  In this case the flushing
-// of dependencies must happen during phase 1 since after GC any
-// dependencies in the unloaded nmethod won't be updated, so
-// traversing the dependency information in unsafe.  In that case this
-// function is called with a non-NULL argument and this function only
-// notifies instanceKlasses that are reachable
-
-void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
-  "is_alive is non-NULL if and only if we are called during GC");
-  if (!has_flushed_dependencies()) {
-    set_has_flushed_dependencies();
-    for (Dependencies::DepStream deps(this); deps.next(); ) {
-      Klass* klass = deps.context_type();
-      if (klass == NULL)  continue;  // ignore things like evol_method
-
-      // During GC the is_alive closure is non-NULL, and is used to
-      // determine liveness of dependees that need to be updated.
-      if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
-        InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
-      }
-    }
-  }
-}
-
-
-// If this oop is not live, the nmethod can be unloaded.
-bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
-  assert(root != NULL, "just checking");
-  oop obj = *root;
-  if (obj == NULL || is_alive->do_object_b(obj)) {
-      return false;
-  }
-
-  // If ScavengeRootsInCode is true, an nmethod might be unloaded
-  // simply because one of its constant oops has gone dead.
-  // No actual classes need to be unloaded in order for this to occur.
-  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
-  make_unloaded(is_alive, obj);
-  return true;
-}
-
-// ------------------------------------------------------------------
-// post_compiled_method_load_event
-// new method for install_code() path
-// Transfer information from compilation to jvmti
-void nmethod::post_compiled_method_load_event() {
-
-  Method* moop = method();
-#ifndef USDT2
-  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
-      moop->klass_name()->bytes(),
-      moop->klass_name()->utf8_length(),
-      moop->name()->bytes(),
-      moop->name()->utf8_length(),
-      moop->signature()->bytes(),
-      moop->signature()->utf8_length(),
-      insts_begin(), insts_size());
-#else /* USDT2 */
-  HOTSPOT_COMPILED_METHOD_LOAD(
-      (char *) moop->klass_name()->bytes(),
-      moop->klass_name()->utf8_length(),
-      (char *) moop->name()->bytes(),
-      moop->name()->utf8_length(),
-      (char *) moop->signature()->bytes(),
-      moop->signature()->utf8_length(),
-      insts_begin(), insts_size());
-#endif /* USDT2 */
-
-  if (JvmtiExport::should_post_compiled_method_load() ||
-      JvmtiExport::should_post_compiled_method_unload()) {
-    get_and_cache_jmethod_id();
-  }
-
-  if (JvmtiExport::should_post_compiled_method_load()) {
-    // Let the Service thread (which is a real Java thread) post the event
-    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
-    JvmtiDeferredEventQueue::enqueue(
-      JvmtiDeferredEvent::compiled_method_load_event(this));
-  }
-}
-
-jmethodID nmethod::get_and_cache_jmethod_id() {
-  if (_jmethod_id == NULL) {
-    // Cache the jmethod_id since it can no longer be looked up once the
-    // method itself has been marked for unloading.
-    _jmethod_id = method()->jmethod_id();
-  }
-  return _jmethod_id;
-}
-
-void nmethod::post_compiled_method_unload() {
-  if (unload_reported()) {
-    // During unloading we transition to unloaded and then to zombie
-    // and the unloading is reported during the first transition.
-    return;
-  }
-
-  assert(_method != NULL && !is_unloaded(), "just checking");
-  DTRACE_METHOD_UNLOAD_PROBE(method());
-
-  // If a JVMTI agent has enabled the CompiledMethodUnload event then
-  // post the event. Sometime later this nmethod will be made a zombie
-  // by the sweeper but the Method* will not be valid at that point.
-  // If the _jmethod_id is null then no load event was ever requested
-  // so don't bother posting the unload.  The main reason for this is
-  // that the jmethodID is a weak reference to the Method* so if
-  // it's being unloaded there's no way to look it up since the weak
-  // ref will have been cleared.
-  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
-    assert(!unload_reported(), "already unloaded");
-    JvmtiDeferredEvent event =
-      JvmtiDeferredEvent::compiled_method_unload_event(this,
-          _jmethod_id, insts_begin());
-    if (SafepointSynchronize::is_at_safepoint()) {
-      // Don't want to take the queueing lock. Add it as pending and
-      // it will get enqueued later.
-      JvmtiDeferredEventQueue::add_pending_event(event);
-    } else {
-      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
-      JvmtiDeferredEventQueue::enqueue(event);
-    }
-  }
-
-  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
-  // any time. As the nmethod is being unloaded now we mark it has
-  // having the unload event reported - this will ensure that we don't
-  // attempt to report the event in the unlikely scenario where the
-  // event is enabled at the time the nmethod is made a zombie.
-  set_unload_reported();
-}
-
-// This is called at the end of the strong tracing/marking phase of a
-// GC to unload an nmethod if it contains otherwise unreachable
-// oops.
-
-void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
-  // Make sure the oop's ready to receive visitors
-  assert(!is_zombie() && !is_unloaded(),
-         "should not call follow on zombie or unloaded nmethod");
-
-  // If the method is not entrant then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-
-  // The RedefineClasses() API can cause the class unloading invariant
-  // to no longer be true. See jvmtiExport.hpp for details.
-  // Also, leave a debugging breadcrumb in local flag.
-  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
-  if (a_class_was_redefined) {
-    // This set of the unloading_occurred flag is done before the
-    // call to post_compiled_method_unload() so that the unloading
-    // of this nmethod is reported.
-    unloading_occurred = true;
-  }
-
-#ifdef GRAAL
-  // Follow Graal method
-  if (_graal_installed_code != NULL) {
-    if (HotSpotNmethod::isDefault(_graal_installed_code)) {
-      if (!is_alive->do_object_b(_graal_installed_code)) {
-        _graal_installed_code = NULL;
-      }
-    } else {
-      if (can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) {
-        return;
-      }
-    }
-  }
-#endif
-
-  // Exception cache
-  ExceptionCache* ec = exception_cache();
-  while (ec != NULL) {
-    Klass* ex_klass = ec->exception_type();
-    ExceptionCache* next_ec = ec->next();
-    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
-      remove_from_exception_cache(ec);
-    }
-    ec = next_ec;
-  }
-
-  // If class unloading occurred we first iterate over all inline caches and
-  // clear ICs where the cached oop is referring to an unloaded klass or method.
-  // The remaining live cached oops will be traversed in the relocInfo::oop_type
-  // iteration below.
-  if (unloading_occurred) {
-    RelocIterator iter(this, low_boundary);
-    while(iter.next()) {
-      if (iter.type() == relocInfo::virtual_call_type) {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
-        if (ic->is_icholder_call()) {
-          // The only exception is compiledICHolder oops which may
-          // yet be marked below. (We check this further below).
-          CompiledICHolder* cichk_oop = ic->cached_icholder();
-          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
-              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
-              continue;
-            }
-        } else {
-          Metadata* ic_oop = ic->cached_metadata();
-          if (ic_oop != NULL) {
-            if (ic_oop->is_klass()) {
-              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else if (ic_oop->is_method()) {
-              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else {
-              ShouldNotReachHere();
-            }
-          }
-          }
-          ic->set_to_clean();
-      }
-    }
-  }
-
-  // Compiled code
-  {
-  RelocIterator iter(this, low_boundary);
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type) {
-      oop_Relocation* r = iter.oop_reloc();
-      // In this loop, we must only traverse those oops directly embedded in
-      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
-      assert(1 == (r->oop_is_immediate()) +
-                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
-             "oop must be found in exactly one place");
-      if (r->oop_is_immediate() && r->oop_value() != NULL) {
-        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
-          return;
-        }
-      }
-    }
-  }
-  }
-
-
-  // Scopes
-  for (oop* p = oops_begin(); p < oops_end(); p++) {
-    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
-    if (can_unload(is_alive, p, unloading_occurred)) {
-      return;
-    }
-  }
-
-  // Ensure that all metadata is still alive
-  verify_metadata_loaders(low_boundary, is_alive);
-}
-
-#ifdef ASSERT
-
-class CheckClass : AllStatic {
-  static BoolObjectClosure* _is_alive;
-
-  // Check class_loader is alive for this bit of metadata.
-  static void check_class(Metadata* md) {
-    Klass* klass = NULL;
-    if (md->is_klass()) {
-      klass = ((Klass*)md);
-    } else if (md->is_method()) {
-      klass = ((Method*)md)->method_holder();
-    } else if (md->is_methodData()) {
-      klass = ((MethodData*)md)->method()->method_holder();
-    } else {
-      md->print();
-      ShouldNotReachHere();
-    }
-    assert(klass->is_loader_alive(_is_alive), "must be alive");
-  }
- public:
-  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
-    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
-    _is_alive = is_alive;
-    nm->metadata_do(check_class);
-  }
-};
-
-// This is called during a safepoint so can use static data
-BoolObjectClosure* CheckClass::_is_alive = NULL;
-#endif // ASSERT
-
-
-// Processing of oop references should have been sufficient to keep
-// all strong references alive.  Any weak references should have been
-// cleared as well.  Visit all the metadata and ensure that it's
-// really alive.
-void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
-#ifdef ASSERT
-    RelocIterator iter(this, low_boundary);
-    while (iter.next()) {
-    // static_stub_Relocations may have dangling references to
-    // Method*s so trim them out here.  Otherwise it looks like
-    // compiled code is maintaining a link to dead metadata.
-    address static_call_addr = NULL;
-    if (iter.type() == relocInfo::opt_virtual_call_type) {
-      CompiledIC* cic = CompiledIC_at(iter.reloc());
-      if (!cic->is_call_to_interpreted()) {
-        static_call_addr = iter.addr();
-      }
-    } else if (iter.type() == relocInfo::static_call_type) {
-      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
-      if (!csc->is_call_to_interpreted()) {
-        static_call_addr = iter.addr();
-      }
-    }
-    if (static_call_addr != NULL) {
-      RelocIterator sciter(this, low_boundary);
-      while (sciter.next()) {
-        if (sciter.type() == relocInfo::static_stub_type &&
-            sciter.static_stub_reloc()->static_call() == static_call_addr) {
-          sciter.static_stub_reloc()->clear_inline_cache();
-        }
-      }
-    }
-  }
-  // Check that the metadata embedded in the nmethod is alive
-  CheckClass::do_check_class(is_alive, this);
-#endif
-}
-
-
-// Iterate over metadata calling this function.   Used by RedefineClasses
-void nmethod::metadata_do(void f(Metadata*)) {
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-  {
-    // Visit all immediate references that are embedded in the instruction stream.
-    RelocIterator iter(this, low_boundary);
-    while (iter.next()) {
-      if (iter.type() == relocInfo::metadata_type ) {
-        metadata_Relocation* r = iter.metadata_reloc();
-        // In this lmetadata, we must only follow those metadatas directly embedded in
-        // the code.  Other metadatas (oop_index>0) are seen as part of
-        // the metadata section below.
-        assert(1 == (r->metadata_is_immediate()) +
-               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
-               "metadata must be found in exactly one place");
-        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
-          Metadata* md = r->metadata_value();
-          f(md);
-        }
-      }
-    }
-  }
-
-  // Visit the metadata section
-  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
-    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
-    Metadata* md = *p;
-    f(md);
-  }
-  // Call function Method*, not embedded in these other places.
-  if (_method != NULL) f(_method);
-}
-
-
-// This method is called twice during GC -- once while
-// tracing the "active" nmethods on thread stacks during
-// the (strong) marking phase, and then again when walking
-// the code cache contents during the weak roots processing
-// phase. The two uses are distinguished by means of the
-// 'do_strong_roots_only' flag, which is true in the first
-// case. We want to walk the weak roots in the nmethod
-// only in the second case. The weak roots in the nmethod
-// are the oops in the ExceptionCache and the InlineCache
-// oops.
-void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
-  // make sure the oops ready to receive visitors
-  assert(!is_zombie() && !is_unloaded(),
-         "should not call follow on zombie or unloaded nmethod");
-
-  // If the method is not entrant or zombie then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-
-#ifdef GRAAL
-  if (_graal_installed_code != NULL) {
-    f->do_oop((oop*) &_graal_installed_code);
-  }
-  if (_triggered_deoptimizations != NULL) {
-    f->do_oop((oop*) &_triggered_deoptimizations);
-  }
-#endif
-
-  RelocIterator iter(this, low_boundary);
-
-  while (iter.next()) {
-    if (iter.type() == relocInfo::oop_type ) {
-      oop_Relocation* r = iter.oop_reloc();
-      // In this loop, we must only follow those oops directly embedded in
-      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
-      assert(1 == (r->oop_is_immediate()) +
-                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
-             "oop must be found in exactly one place");
-      if (r->oop_is_immediate() && r->oop_value() != NULL) {
-        f->do_oop(r->oop_addr());
-      }
-    }
-  }
-
-  // Scopes
-  // This includes oop constants not inlined in the code stream.
-  for (oop* p = oops_begin(); p < oops_end(); p++) {
-    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
-    f->do_oop(p);
-  }
-}
-
-#define NMETHOD_SENTINEL ((nmethod*)badAddress)
-
-nmethod* volatile nmethod::_oops_do_mark_nmethods;
-
-// An nmethod is "marked" if its _mark_link is set non-null.
-// Even if it is the end of the linked list, it will have a non-null link value,
-// as long as it is on the list.
-// This code must be MP safe, because it is used from parallel GC passes.
-bool nmethod::test_set_oops_do_mark() {
-  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
-  nmethod* observed_mark_link = _oops_do_mark_link;
-  if (observed_mark_link == NULL) {
-    // Claim this nmethod for this thread to mark.
-    observed_mark_link = (nmethod*)
-      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
-    if (observed_mark_link == NULL) {
-
-      // Atomically append this nmethod (now claimed) to the head of the list:
-      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
-      for (;;) {
-        nmethod* required_mark_nmethods = observed_mark_nmethods;
-        _oops_do_mark_link = required_mark_nmethods;
-        observed_mark_nmethods = (nmethod*)
-          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
-        if (observed_mark_nmethods == required_mark_nmethods)
-          break;
-      }
-      // Mark was clear when we first saw this guy.
-      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
-      return false;
-    }
-  }
-  // On fall through, another racing thread marked this nmethod before we did.
-  return true;
-}
-
-void nmethod::oops_do_marking_prologue() {
-  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
-  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
-  // We use cmpxchg_ptr instead of regular assignment here because the user
-  // may fork a bunch of threads, and we need them all to see the same state.
-  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
-  guarantee(observed == NULL, "no races in this sequential code");
-}
-
-void nmethod::oops_do_marking_epilogue() {
-  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
-  nmethod* cur = _oops_do_mark_nmethods;
-  while (cur != NMETHOD_SENTINEL) {
-    assert(cur != NULL, "not NULL-terminated");
-    nmethod* next = cur->_oops_do_mark_link;
-    cur->_oops_do_mark_link = NULL;
-    cur->fix_oop_relocations();
-    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
-    cur = next;
-  }
-  void* required = _oops_do_mark_nmethods;
-  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
-  guarantee(observed == required, "no races in this sequential code");
-  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
-}
-
-class DetectScavengeRoot: public OopClosure {
-  bool     _detected_scavenge_root;
-public:
-  DetectScavengeRoot() : _detected_scavenge_root(false)
-  { NOT_PRODUCT(_print_nm = NULL); }
-  bool detected_scavenge_root() { return _detected_scavenge_root; }
-  virtual void do_oop(oop* p) {
-    if ((*p) != NULL && (*p)->is_scavengable()) {
-      NOT_PRODUCT(maybe_print(p));
-      _detected_scavenge_root = true;
-    }
-  }
-  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-
-#ifndef PRODUCT
-  nmethod* _print_nm;
-  void maybe_print(oop* p) {
-    if (_print_nm == NULL)  return;
-    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
-    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
-                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
-                  (intptr_t)(*p), (intptr_t)p);
-    (*p)->print();
-  }
-#endif //PRODUCT
-};
-
-bool nmethod::detect_scavenge_root_oops() {
-  DetectScavengeRoot detect_scavenge_root;
-  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
-  oops_do(&detect_scavenge_root);
-  return detect_scavenge_root.detected_scavenge_root();
-}
-
-// Method that knows how to preserve outgoing arguments at call. This method must be
-// called with a frame corresponding to a Java invoke
-void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
-#ifndef SHARK
-  if (!method()->is_native()) {
-    SimpleScopeDesc ssd(this, fr.pc());
-    Bytecode_invoke call(ssd.method(), ssd.bci());
-    // compiled invokedynamic call sites have an implicit receiver at
-    // resolution time, so make sure it gets GC'ed.
-    bool has_receiver = !call.is_invokestatic();
-    Symbol* signature = call.signature();
-    fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
-  }
-#endif // !SHARK
-}
-
-
-oop nmethod::embeddedOop_at(u_char* p) {
-  RelocIterator iter(this, p, p + 1);
-  while (iter.next())
-    if (iter.type() == relocInfo::oop_type) {
-      return iter.oop_reloc()->oop_value();
-    }
-  return NULL;
-}
-
-
-inline bool includes(void* p, void* from, void* to) {
-  return from <= p && p < to;
-}
-
-
-void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
-  assert(count >= 2, "must be sentinel values, at least");
-
-#ifdef ASSERT
-  // must be sorted and unique; we do a binary search in find_pc_desc()
-  int prev_offset = pcs[0].pc_offset();
-  assert(prev_offset == PcDesc::lower_offset_limit,
-         "must start with a sentinel");
-  for (int i = 1; i < count; i++) {
-    int this_offset = pcs[i].pc_offset();
-    assert(this_offset > prev_offset, "offsets must be sorted");
-    prev_offset = this_offset;
-  }
-  assert(prev_offset == PcDesc::upper_offset_limit,
-         "must end with a sentinel");
-#endif //ASSERT
-
-  // Search for MethodHandle invokes and tag the nmethod.
-  for (int i = 0; i < count; i++) {
-    if (pcs[i].is_method_handle_invoke()) {
-      set_has_method_handle_invokes(true);
-      break;
-    }
-  }
-  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
-
-  int size = count * sizeof(PcDesc);
-  assert(scopes_pcs_size() >= size, "oob");
-  memcpy(scopes_pcs_begin(), pcs, size);
-
-  // Adjust the final sentinel downward.
-  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
-  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
-  last_pc->set_pc_offset(content_size() + 1);
-  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
-    // Fill any rounding gaps with copies of the last record.
-    last_pc[1] = last_pc[0];
-  }
-  // The following assert could fail if sizeof(PcDesc) is not
-  // an integral multiple of oopSize (the rounding term).
-  // If it fails, change the logic to always allocate a multiple
-  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
-  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
-}
-
-void nmethod::copy_scopes_data(u_char* buffer, int size) {
-  assert(scopes_data_size() >= size, "oob");
-  memcpy(scopes_data_begin(), buffer, size);
-}
-
-
-#ifdef ASSERT
-static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
-  PcDesc* lower = nm->scopes_pcs_begin();
-  PcDesc* upper = nm->scopes_pcs_end();
-  lower += 1; // exclude initial sentinel
-  PcDesc* res = NULL;
-  for (PcDesc* p = lower; p < upper; p++) {
-    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
-    if (match_desc(p, pc_offset, approximate)) {
-      if (res == NULL)
-        res = p;
-      else
-        res = (PcDesc*) badAddress;
-    }
-  }
-  return res;
-}
-#endif
-
-
-// Finds a PcDesc with real-pc equal to "pc"
-PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
-  address base_address = code_begin();
-  if ((pc < base_address) ||
-      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
-    return NULL;  // PC is wildly out of range
-  }
-  int pc_offset = (int) (pc - base_address);
-
-  // Check the PcDesc cache if it contains the desired PcDesc
-  // (This as an almost 100% hit rate.)
-  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
-  if (res != NULL) {
-    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
-    return res;
-  }
-
-  // Fallback algorithm: quasi-linear search for the PcDesc
-  // Find the last pc_offset less than the given offset.
-  // The successor must be the required match, if there is a match at all.
-  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
-  PcDesc* lower = scopes_pcs_begin();
-  PcDesc* upper = scopes_pcs_end();
-  upper -= 1; // exclude final sentinel
-  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
-
-#define assert_LU_OK \
-  /* invariant on lower..upper during the following search: */ \
-  assert(lower->pc_offset() <  pc_offset, "sanity"); \
-  assert(upper->pc_offset() >= pc_offset, "sanity")
-  assert_LU_OK;
-
-  // Use the last successful return as a split point.
-  PcDesc* mid = _pc_desc_cache.last_pc_desc();
-  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
-  if (mid->pc_offset() < pc_offset) {
-    lower = mid;
-  } else {
-    upper = mid;
-  }
-
-  // Take giant steps at first (4096, then 256, then 16, then 1)
-  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
-  const int RADIX = (1 << LOG2_RADIX);
-  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
-    while ((mid = lower + step) < upper) {
-      assert_LU_OK;
-      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
-      if (mid->pc_offset() < pc_offset) {
-        lower = mid;
-      } else {
-        upper = mid;
-        break;
-      }
-    }
-    assert_LU_OK;
-  }
-
-  // Sneak up on the value with a linear search of length ~16.
-  while (true) {
-    assert_LU_OK;
-    mid = lower + 1;
-    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
-    if (mid->pc_offset() < pc_offset) {
-      lower = mid;
-    } else {
-      upper = mid;
-      break;
-    }
-  }
-#undef assert_LU_OK
-
-  if (match_desc(upper, pc_offset, approximate)) {
-    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
-    _pc_desc_cache.add_pc_desc(upper);
-    return upper;
-  } else {
-    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
-    return NULL;
-  }
-}
-
-
-bool nmethod::check_all_dependencies() {
-  bool found_check = false;
-  // wholesale check of all dependencies
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.check_dependency() != NULL) {
-      found_check = true;
-      NOT_DEBUG(break);
-    }
-  }
-  return found_check;  // tell caller if we found anything
-}
-
-bool nmethod::check_dependency_on(DepChange& changes) {
-  // What has happened:
-  // 1) a new class dependee has been added
-  // 2) dependee and all its super classes have been marked
-  bool found_check = false;  // set true if we are upset
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    // Evaluate only relevant dependencies.
-    if (deps.spot_check_dependency_at(changes) != NULL) {
-      found_check = true;
-      NOT_DEBUG(break);
-    }
-  }
-  return found_check;
-}
-
-bool nmethod::is_evol_dependent_on(Klass* dependee) {
-  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
-  Array<Method*>* dependee_methods = dependee_ik->methods();
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.type() == Dependencies::evol_method) {
-      Method* method = deps.method_argument(0);
-      for (int j = 0; j < dependee_methods->length(); j++) {
-        if (dependee_methods->at(j) == method) {
-          // RC_TRACE macro has an embedded ResourceMark
-          RC_TRACE(0x01000000,
-            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
-            _method->method_holder()->external_name(),
-            _method->name()->as_C_string(),
-            _method->signature()->as_C_string(), compile_id(),
-            method->method_holder()->external_name(),
-            method->name()->as_C_string(),
-            method->signature()->as_C_string()));
-          if (TraceDependencies || LogCompilation)
-            deps.log_dependency(dependee);
-          return true;
-        }
-      }
-    }
-  }
-  return false;
-}
-
-// Called from mark_for_deoptimization, when dependee is invalidated.
-bool nmethod::is_dependent_on_method(Method* dependee) {
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.type() != Dependencies::evol_method)
-      continue;
-    Method* method = deps.method_argument(0);
-    if (method == dependee) return true;
-  }
-  return false;
-}
-
-
-bool nmethod::is_patchable_at(address instr_addr) {
-  assert(insts_contains(instr_addr), "wrong nmethod used");
-  if (is_zombie()) {
-    // a zombie may never be patched
-    return false;
-  }
-  return true;
-}
-
-
-address nmethod::continuation_for_implicit_exception(address pc) {
-  // Exception happened outside inline-cache check code => we are inside
-  // an active nmethod => use cpc to determine a return address
-  int exception_offset = pc - code_begin();
-  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
-#ifdef ASSERT
-  if (cont_offset == 0) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
-    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
-    HandleMark hm(thread);
-    ResourceMark rm(thread);
-    CodeBlob* cb = CodeCache::find_blob(pc);
-    assert(cb != NULL && cb == this, "");
-    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
-    print();
-    method()->print_codes();
-    print_code();
-    print_pcs();
-  }
-#endif
-  if (cont_offset == 0) {
-    // Let the normal error handling report the exception
-    return NULL;
-  }
-  return code_begin() + cont_offset;
-}
-
-
-
-void nmethod_init() {
-  // make sure you didn't forget to adjust the filler fields
-  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
-}
-
-
-//-------------------------------------------------------------------------------------------
-
-
-// QQQ might we make this work from a frame??
-nmethodLocker::nmethodLocker(address pc) {
-  CodeBlob* cb = CodeCache::find_blob(pc);
-  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
-  _nm = (nmethod*)cb;
-  lock_nmethod(_nm);
-}
-
-// Only JvmtiDeferredEvent::compiled_method_unload_event()
-// should pass zombie_ok == true.
-void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
-  if (nm == NULL)  return;
-  Atomic::inc(&nm->_lock_count);
-  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
-}
-
-void nmethodLocker::unlock_nmethod(nmethod* nm) {
-  if (nm == NULL)  return;
-  Atomic::dec(&nm->_lock_count);
-  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
-}
-
-
-// -----------------------------------------------------------------------------
-// nmethod::get_deopt_original_pc
-//
-// Return the original PC for the given PC if:
-// (a) the given PC belongs to a nmethod and
-// (b) it is a deopt PC
-address nmethod::get_deopt_original_pc(const frame* fr) {
-  if (fr->cb() == NULL)  return NULL;
-
-  nmethod* nm = fr->cb()->as_nmethod_or_null();
-  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
-    return nm->get_original_pc(fr);
-
-  return NULL;
-}
-
-
-// -----------------------------------------------------------------------------
-// MethodHandle
-
-bool nmethod::is_method_handle_return(address return_pc) {
-  if (!has_method_handle_invokes())  return false;
-  PcDesc* pd = pc_desc_at(return_pc);
-  if (pd == NULL)
-    return false;
-  return pd->is_method_handle_invoke();
-}
-
-
-// -----------------------------------------------------------------------------
-// Verification
-
-class VerifyOopsClosure: public OopClosure {
-  nmethod* _nm;
-  bool     _ok;
-public:
-  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
-  bool ok() { return _ok; }
-  virtual void do_oop(oop* p) {
-    if ((*p) == NULL || (*p)->is_oop())  return;
-    if (_ok) {
-      _nm->print_nmethod(true);
-      _ok = false;
-    }
-    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
-  }
-  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-
-void nmethod::verify() {
-
-  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
-  // seems odd.
-
-  if( is_zombie() || is_not_entrant() )
-    return;
-
-  // Make sure all the entry points are correctly aligned for patching.
-  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
-
-  // assert(method()->is_oop(), "must be valid");
-
-  ResourceMark rm;
-
-  if (!CodeCache::contains(this)) {
-    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
-  }
-
-  if(is_native_method() )
-    return;
-
-  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
-  if (nm != this) {
-    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
-                  this));
-  }
-
-  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
-    if (! p->verify(this)) {
-      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
-    }
-  }
-
-  VerifyOopsClosure voc(this);
-  oops_do(&voc);
-  assert(voc.ok(), "embedded oops must be OK");
-  verify_scavenge_root_oops();
-
-  verify_scopes();
-}
-
-
-void nmethod::verify_interrupt_point(address call_site) {
-  // This code does not work in release mode since
-  // owns_lock only is available in debug mode.
-  CompiledIC* ic = NULL;
-  Thread *cur = Thread::current();
-  if (CompiledIC_lock->owner() == cur ||
-      ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
-       SafepointSynchronize::is_at_safepoint())) {
-    ic = CompiledIC_at(this, call_site);
-    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
-  } else {
-    MutexLocker ml_verify (CompiledIC_lock);
-    ic = CompiledIC_at(this, call_site);
-  }
-
-  PcDesc* pd = pc_desc_at(ic->end_of_call());
-  assert(pd != NULL, "PcDesc must exist");
-  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
-                                     pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
-                                     pd->return_oop());
-       !sd->is_top(); sd = sd->sender()) {
-    sd->verify();
-  }
-}
-
-void nmethod::verify_scopes() {
-  if( !method() ) return;       // Runtime stubs have no scope
-  if (method()->is_native()) return; // Ignore stub methods.
-  // iterate through all interrupt point
-  // and verify the debug information is valid.
-  RelocIterator iter((nmethod*)this);
-  while (iter.next()) {
-    address stub = NULL;
-    switch (iter.type()) {
-      case relocInfo::virtual_call_type:
-        verify_interrupt_point(iter.addr());
-        break;
-      case relocInfo::opt_virtual_call_type:
-        stub = iter.opt_virtual_call_reloc()->static_stub();
-        verify_interrupt_point(iter.addr());
-        break;
-      case relocInfo::static_call_type:
-        stub = iter.static_call_reloc()->static_stub();
-        //verify_interrupt_point(iter.addr());
-        break;
-      case relocInfo::runtime_call_type:
-        address destination = iter.reloc()->value();
-        // Right now there is no way to find out which entries support
-        // an interrupt point.  It would be nice if we had this
-        // information in a table.
-        break;
-    }
-    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Non-product code
-#ifndef PRODUCT
-
-class DebugScavengeRoot: public OopClosure {
-  nmethod* _nm;
-  bool     _ok;
-public:
-  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
-  bool ok() { return _ok; }
-  virtual void do_oop(oop* p) {
-    if ((*p) == NULL || !(*p)->is_scavengable())  return;
-    if (_ok) {
-      _nm->print_nmethod(true);
-      _ok = false;
-    }
-    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
-    (*p)->print();
-  }
-  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-
-void nmethod::verify_scavenge_root_oops() {
-  if (!on_scavenge_root_list()) {
-    // Actually look inside, to verify the claim that it's clean.
-    DebugScavengeRoot debug_scavenge_root(this);
-    oops_do(&debug_scavenge_root);
-    if (!debug_scavenge_root.ok())
-      fatal("found an unadvertised bad scavengable oop in the code cache");
-  }
-  assert(scavenge_root_not_marked(), "");
-}
-
-#endif // PRODUCT
-
-// Printing operations
-
-void nmethod::print() const {
-  ResourceMark rm;
-  ttyLocker ttyl;   // keep the following output all in one block
-
-  tty->print("Compiled method ");
-
-  if (is_compiled_by_c1()) {
-    tty->print("(c1) ");
-  } else if (is_compiled_by_c2()) {
-    tty->print("(c2) ");
-  } else if (is_compiled_by_shark()) {
-    tty->print("(shark) ");
-  } else if (is_compiled_by_graal()) {
-    tty->print("(Graal) ");
-  } else {
-    tty->print("(nm) ");
-  }
-
-  print_on(tty, NULL);
-
-  if (WizardMode) {
-    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
-    tty->print(" for method " INTPTR_FORMAT , (address)method());
-    tty->print(" { ");
-    if (is_in_use())      tty->print("in_use ");
-    if (is_not_entrant()) tty->print("not_entrant ");
-    if (is_zombie())      tty->print("zombie ");
-    if (is_unloaded())    tty->print("unloaded ");
-    if (on_scavenge_root_list())  tty->print("scavenge_root ");
-    tty->print_cr("}:");
-  }
-  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              (address)this,
-                                              (address)this + size(),
-                                              size());
-  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              relocation_begin(),
-                                              relocation_end(),
-                                              relocation_size());
-  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              consts_begin(),
-                                              consts_end(),
-                                              consts_size());
-  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              insts_begin(),
-                                              insts_end(),
-                                              insts_size());
-  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              stub_begin(),
-                                              stub_end(),
-                                              stub_size());
-  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              oops_begin(),
-                                              oops_end(),
-                                              oops_size());
-  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              metadata_begin(),
-                                              metadata_end(),
-                                              metadata_size());
-  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              scopes_data_begin(),
-                                              scopes_data_end(),
-                                              scopes_data_size());
-  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              scopes_pcs_begin(),
-                                              scopes_pcs_end(),
-                                              scopes_pcs_size());
-  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              dependencies_begin(),
-                                              dependencies_end(),
-                                              dependencies_size());
-  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              handler_table_begin(),
-                                              handler_table_end(),
-                                              handler_table_size());
-  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
-                                              nul_chk_table_begin(),
-                                              nul_chk_table_end(),
-                                              nul_chk_table_size());
-}
-
-void nmethod::print_code() {
-  HandleMark hm;
-  ResourceMark m;
-  Disassembler::decode(this);
-}
-
-
-#ifndef PRODUCT
-
-void nmethod::print_scopes() {
-  // Find the first pc desc for all scopes in the code and print it.
-  ResourceMark rm;
-  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
-    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
-      continue;
-
-    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
-    sd->print_on(tty, p);
-  }
-}
-
-void nmethod::print_dependencies() {
-  ResourceMark rm;
-  ttyLocker ttyl;   // keep the following output all in one block
-  tty->print_cr("Dependencies:");
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    deps.print_dependency();
-    Klass* ctxk = deps.context_type();
-    if (ctxk != NULL) {
-      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
-        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
-      }
-    }
-    deps.log_dependency();  // put it into the xml log also
-  }
-}
-
-
-void nmethod::print_relocations() {
-  ResourceMark m;       // in case methods get printed via the debugger
-  tty->print_cr("relocations:");
-  RelocIterator iter(this);
-  iter.print();
-  if (UseRelocIndex) {
-    jint* index_end   = (jint*)relocation_end() - 1;
-    jint  index_size  = *index_end;
-    jint* index_start = (jint*)( (address)index_end - index_size );
-    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
-    if (index_size > 0) {
-      jint* ip;
-      for (ip = index_start; ip+2 <= index_end; ip += 2)
-        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
-                      ip[0],
-                      ip[1],
-                      header_end()+ip[0],
-                      relocation_begin()-1+ip[1]);
-      for (; ip < index_end; ip++)
-        tty->print_cr("  (%d ?)", ip[0]);
-      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
-      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
-    }
-  }
-}
-
-
-void nmethod::print_pcs() {
-  ResourceMark m;       // in case methods get printed via debugger
-  tty->print_cr("pc-bytecode offsets:");
-  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
-    p->print(this);
-  }
-}
-
-#endif // PRODUCT
-
-const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
-  RelocIterator iter(this, begin, end);
-  bool have_one = false;
-  while (iter.next()) {
-    have_one = true;
-    switch (iter.type()) {
-        case relocInfo::none:                  return "no_reloc";
-        case relocInfo::oop_type: {
-          stringStream st;
-          oop_Relocation* r = iter.oop_reloc();
-          oop obj = r->oop_value();
-          st.print("oop(");
-          if (obj == NULL) st.print("NULL");
-          else obj->print_value_on(&st);
-          st.print(")");
-          return st.as_string();
-        }
-        case relocInfo::metadata_type: {
-          stringStream st;
-          metadata_Relocation* r = iter.metadata_reloc();
-          Metadata* obj = r->metadata_value();
-          st.print("metadata(");
-          if (obj == NULL) st.print("NULL");
-          else obj->print_value_on(&st);
-          st.print(")");
-          return st.as_string();
-        }
-        case relocInfo::virtual_call_type:     return "virtual_call";
-        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
-        case relocInfo::static_call_type:      return "static_call";
-        case relocInfo::static_stub_type:      return "static_stub";
-        case relocInfo::runtime_call_type:     return "runtime_call";
-        case relocInfo::external_word_type:    return "external_word";
-        case relocInfo::internal_word_type:    return "internal_word";
-        case relocInfo::section_word_type:     return "section_word";
-        case relocInfo::poll_type:             return "poll";
-        case relocInfo::poll_return_type:      return "poll_return";
-        case relocInfo::type_mask:             return "type_bit_mask";
-    }
-  }
-  return have_one ? "other" : NULL;
-}
-
-// Return a the last scope in (begin..end]
-ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
-  PcDesc* p = pc_desc_near(begin+1);
-  if (p != NULL && p->real_pc(this) <= end) {
-    return new ScopeDesc(this, p->scope_decode_offset(),
-                         p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(),
-                         p->return_oop());
-  }
-  return NULL;
-}
-
-void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
-  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
-  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
-  if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
-  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
-  if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
-
-  if (has_method_handle_invokes())
-    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
-
-  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
-
-  if (block_begin == entry_point()) {
-    methodHandle m = method();
-    if (m.not_null()) {
-      stream->print("  # ");
-      m->print_value_on(stream);
-      stream->cr();
-    }
-    if (m.not_null() && !is_osr_method()) {
-      ResourceMark rm;
-      int sizeargs = m->size_of_parameters();
-      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
-      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
-      {
-        int sig_index = 0;
-        if (!m->is_static())
-          sig_bt[sig_index++] = T_OBJECT; // 'this'
-        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
-          BasicType t = ss.type();
-          sig_bt[sig_index++] = t;
-          if (type2size[t] == 2) {
-            sig_bt[sig_index++] = T_VOID;
-          } else {
-            assert(type2size[t] == 1, "size is 1 or 2");
-          }
-        }
-        assert(sig_index == sizeargs, "");
-      }
-      const char* spname = "sp"; // make arch-specific?
-      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
-      int stack_slot_offset = this->frame_size() * wordSize;
-      int tab1 = 14, tab2 = 24;
-      int sig_index = 0;
-      int arg_index = (m->is_static() ? 0 : -1);
-      bool did_old_sp = false;
-      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
-        bool at_this = (arg_index == -1);
-        bool at_old_sp = false;
-        BasicType t = (at_this ? T_OBJECT : ss.type());
-        assert(t == sig_bt[sig_index], "sigs in sync");
-        if (at_this)
-          stream->print("  # this: ");
-        else
-          stream->print("  # parm%d: ", arg_index);
-        stream->move_to(tab1);
-        VMReg fst = regs[sig_index].first();
-        VMReg snd = regs[sig_index].second();
-        if (fst->is_reg()) {
-          stream->print("%s", fst->name());
-          if (snd->is_valid())  {
-            stream->print(":%s", snd->name());
-          }
-        } else if (fst->is_stack()) {
-          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
-          if (offset == stack_slot_offset)  at_old_sp = true;
-          stream->print("[%s+0x%x]", spname, offset);
-        } else {
-          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
-        }
-        stream->print(" ");
-        stream->move_to(tab2);
-        stream->print("= ");
-        if (at_this) {
-          m->method_holder()->print_value_on(stream);
-        } else {
-          bool did_name = false;
-          if (!at_this && ss.is_object()) {
-            Symbol* name = ss.as_symbol_or_null();
-            if (name != NULL) {
-              name->print_value_on(stream);
-              did_name = true;
-            }
-          }
-          if (!did_name)
-            stream->print("%s", type2name(t));
-        }
-        if (at_old_sp) {
-          stream->print("  (%s of caller)", spname);
-          did_old_sp = true;
-        }
-        stream->cr();
-        sig_index += type2size[t];
-        arg_index += 1;
-        if (!at_this)  ss.next();
-      }
-      if (!did_old_sp) {
-        stream->print("  # ");
-        stream->move_to(tab1);
-        stream->print("[%s+0x%x]", spname, stack_slot_offset);
-        stream->print("  (%s of caller)", spname);
-        stream->cr();
-      }
-    }
-  }
-}
-
-void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
-  // First, find an oopmap in (begin, end].
-  // We use the odd half-closed interval so that oop maps and scope descs
-  // which are tied to the byte after a call are printed with the call itself.
-  address base = code_begin();
-  OopMapSet* oms = oop_maps();
-  if (oms != NULL) {
-    for (int i = 0, imax = oms->size(); i < imax; i++) {
-      OopMap* om = oms->at(i);
-      address pc = base + om->offset();
-      if (pc > begin) {
-        if (pc <= end) {
-          st->move_to(column);
-          st->print("; ");
-          om->print_on(st);
-        }
-        break;
-      }
-    }
-  }
-
-  // Print any debug info present at this pc.
-  ScopeDesc* sd  = scope_desc_in(begin, end);
-  if (sd != NULL) {
-    st->move_to(column);
-    if (sd->bci() == SynchronizationEntryBCI) {
-      st->print(";*synchronization entry");
-    } else {
-      if (sd->method() == NULL) {
-        st->print("method is NULL");
-      } else if (sd->method()->is_native()) {
-        st->print("method is native");
-      } else {
-        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
-        st->print(";*%s", Bytecodes::name(bc));
-        switch (bc) {
-        case Bytecodes::_invokevirtual:
-        case Bytecodes::_invokespecial:
-        case Bytecodes::_invokestatic:
-        case Bytecodes::_invokeinterface:
-          {
-            Bytecode_invoke invoke(sd->method(), sd->bci());
-            st->print(" ");
-            if (invoke.name() != NULL)
-              invoke.name()->print_symbol_on(st);
-            else
-              st->print("<UNKNOWN>");
-            break;
-          }
-        case Bytecodes::_getfield:
-        case Bytecodes::_putfield:
-        case Bytecodes::_getstatic:
-        case Bytecodes::_putstatic:
-          {
-            Bytecode_field field(sd->method(), sd->bci());
-            st->print(" ");
-            if (field.name() != NULL)
-              field.name()->print_symbol_on(st);
-            else
-              st->print("<UNKNOWN>");
-          }
-        }
-      }
-    }
-
-    // Print all scopes
-    for (;sd != NULL; sd = sd->sender()) {
-      st->move_to(column);
-      st->print("; -");
-      if (sd->method() == NULL) {
-        st->print("method is NULL");
-      } else {
-        sd->method()->print_short_name(st);
-      }
-      int lineno = sd->method()->line_number_from_bci(sd->bci());
-      if (lineno != -1) {
-        st->print("@%d (line %d)", sd->bci(), lineno);
-      } else {
-        st->print("@%d", sd->bci());
-      }
-      st->cr();
-    }
-  }
-
-  // Print relocation information
-  const char* str = reloc_string_for(begin, end);
-  if (str != NULL) {
-    if (sd != NULL) st->cr();
-    st->move_to(column);
-    st->print(";   {%s}", str);
-  }
-  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
-  if (cont_offset != 0) {
-    st->move_to(column);
-    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
-  }
-
-}
-
-#ifndef PRODUCT
-
-void nmethod::print_value_on(outputStream* st) const {
-  st->print("nmethod");
-  print_on(st, NULL);
-}
-
-void nmethod::print_calls(outputStream* st) {
-  RelocIterator iter(this);
-  while (iter.next()) {
-    switch (iter.type()) {
-    case relocInfo::virtual_call_type:
-    case relocInfo::opt_virtual_call_type: {
-      VerifyMutexLocker mc(CompiledIC_lock);
-      CompiledIC_at(iter.reloc())->print();
-      break;
-    }
-    case relocInfo::static_call_type:
-      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
-      compiledStaticCall_at(iter.reloc())->print();
-      break;
-    }
-  }
-}
-
-void nmethod::print_handler_table() {
-  ExceptionHandlerTable(this).print();
-}
-
-void nmethod::print_nul_chk_table() {
-  ImplicitExceptionTable(this).print(code_begin());
-}
-
-void nmethod::print_statistics() {
-  ttyLocker ttyl;
-  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
-  nmethod_stats.print_native_nmethod_stats();
-  nmethod_stats.print_nmethod_stats();
-  DebugInformationRecorder::print_statistics();
-  nmethod_stats.print_pc_stats();
-  Dependencies::print_statistics();
-  if (xtty != NULL)  xtty->tail("statistics");
-}
-
-#endif // PRODUCT
+}
+
+
+nmethod::nmethod(
+  Method* method,
+  int nmethod_size,
+  int compile_id,
+  int entry_bci,
+  CodeOffsets* offsets,
+  int orig_pc_offset,
+  DebugInformationRecorder* debug_info,
+  Dependencies* dependencies,
+  CodeBuffer *code_buffer,
+  int frame_size,
+  OopMapSet* oop_maps,
+  ExceptionHandlerTable* handler_table,
+  ImplicitExceptionTable* nul_chk_table,
+  AbstractCompiler* compiler,
+  int comp_level,
+  GrowableArray<jlong>* leaf_graph_ids
+#ifdef GRAAL
+  , Handle installed_code,
+  Handle triggered_deoptimizations
+#endif
+  )
+  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
+             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
+  _native_receiver_sp_offset(in_ByteSize(-1)),
+  _native_basic_lock_sp_offset(in_ByteSize(-1))
+{
+  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
+  {
+    debug_only(No_Safepoint_Verifier nsv;)
+    assert_locked_or_safepoint(CodeCache_lock);
+
+    init_defaults();
+    _method                  = method;
+    _entry_bci               = entry_bci;
+    _compile_id              = compile_id;
+    _comp_level              = comp_level;
+    _compiler                = compiler;
+    _orig_pc_offset          = orig_pc_offset;
+
+    // Section offsets
+    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
+    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
+
+#ifdef GRAAL
+    _graal_installed_code = installed_code();
+    _triggered_deoptimizations = (typeArrayOop)triggered_deoptimizations();
+#endif
+    if (compiler->is_graal()) {
+      // Graal might not produce any stub sections
+      if (offsets->value(CodeOffsets::Exceptions) != -1) {
+        _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
+      } else {
+        _exception_offset = -1;
+      }
+      if (offsets->value(CodeOffsets::Deopt) != -1) {
+        _deoptimize_offset       = code_offset()          + offsets->value(CodeOffsets::Deopt);
+      } else {
+        _deoptimize_offset = -1;
+      }
+      if (offsets->value(CodeOffsets::DeoptMH) != -1) {
+        _deoptimize_mh_offset  = code_offset()          + offsets->value(CodeOffsets::DeoptMH);
+      } else {
+        _deoptimize_mh_offset  = -1;
+      }
+    } else {
+      // Exception handler and deopt handler are in the stub section
+      assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
+      assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
+
+      _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
+      _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
+      if (offsets->value(CodeOffsets::DeoptMH) != -1) {
+        _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
+      } else {
+        _deoptimize_mh_offset  = -1;
+      }
+    }
+    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
+      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
+    } else {
+      _unwind_handler_offset = -1;
+    }
+
+    int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize);
+
+    _oops_offset             = data_offset();
+    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
+    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
+
+    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
+    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
+    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
+    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
+    _leaf_graph_ids_offset   = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
+    _nmethod_end_offset      = _leaf_graph_ids_offset + leaf_graph_ids_size;
+
+    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
+    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
+    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
+    _exception_cache         = NULL;
+    _pc_desc_cache.reset_to(scopes_pcs_begin());
+
+    // Copy contents of ScopeDescRecorder to nmethod
+    code_buffer->copy_values_to(this);
+    debug_info->copy_to(this);
+    dependencies->copy_to(this);
+    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+      CodeCache::add_scavenge_root_nmethod(this);
+    }
+    debug_only(verify_scavenge_root_oops());
+
+    CodeCache::commit(this);
+
+    // Copy contents of ExceptionHandlerTable to nmethod
+    handler_table->copy_to(this);
+    nul_chk_table->copy_to(this);
+
+    if (leaf_graph_ids != NULL && leaf_graph_ids_size > 0) {
+      memcpy(leaf_graph_ids_begin(), leaf_graph_ids->adr_at(0), leaf_graph_ids_size);
+    }
+
+    // we use the information of entry points to find out if a method is
+    // static or non static
+    assert(compiler->is_c2() ||
+           _method->is_static() == (entry_point() == _verified_entry_point),
+           " entry points must be same for static methods and vice versa");
+  }
+
+  bool printnmethods = PrintNMethods
+    || CompilerOracle::should_print(_method)
+    || CompilerOracle::has_option_string(_method, "PrintNMethods");
+  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
+    print_nmethod(printnmethods);
+  }
+}
+
+
+// Print a short set of xml attributes to identify this nmethod.  The
+// output should be embedded in some other element.
+void nmethod::log_identity(xmlStream* log) const {
+  log->print(" compile_id='%d'", compile_id());
+  const char* nm_kind = compile_kind();
+  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
+  if (compiler() != NULL) {
+    log->print(" compiler='%s'", compiler()->name());
+  }
+  if (TieredCompilation) {
+    log->print(" level='%d'", comp_level());
+  }
+}
+
+
+#define LOG_OFFSET(log, name)                    \
+  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
+    log->print(" " XSTR(name) "_offset='%d'"    , \
+               (intptr_t)name##_begin() - (intptr_t)this)
+
+
+void nmethod::log_new_nmethod() const {
+  if (LogCompilation && xtty != NULL) {
+    ttyLocker ttyl;
+    HandleMark hm;
+    xtty->begin_elem("nmethod");
+    log_identity(xtty);
+    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
+    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
+
+    LOG_OFFSET(xtty, relocation);
+    LOG_OFFSET(xtty, consts);
+    LOG_OFFSET(xtty, insts);
+    LOG_OFFSET(xtty, stub);
+    LOG_OFFSET(xtty, scopes_data);
+    LOG_OFFSET(xtty, scopes_pcs);
+    LOG_OFFSET(xtty, dependencies);
+    LOG_OFFSET(xtty, handler_table);
+    LOG_OFFSET(xtty, nul_chk_table);
+    LOG_OFFSET(xtty, oops);
+
+    xtty->method(method());
+    xtty->stamp();
+    xtty->end_elem();
+  }
+}
+
+#undef LOG_OFFSET
+
+
+// Print out more verbose output usually for a newly created nmethod.
+void nmethod::print_on(outputStream* st, const char* msg) const {
+  if (st != NULL) {
+    ttyLocker ttyl;
+    if (WizardMode) {
+      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
+      st->print_cr(" (" INTPTR_FORMAT ")", this);
+    } else {
+      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
+    }
+  }
+}
+
+
+void nmethod::print_nmethod(bool printmethod) {
+  ttyLocker ttyl;  // keep the following output all in one block
+  if (xtty != NULL) {
+    xtty->begin_head("print_nmethod");
+    xtty->stamp();
+    xtty->end_head();
+  }
+  // print the header part first
+  print();
+  // then print the requested information
+  if (printmethod) {
+    print_code();
+    print_pcs();
+    if (oop_maps()) {
+      oop_maps()->print();
+    }
+  }
+  if (PrintDebugInfo) {
+    print_scopes();
+  }
+  if (PrintRelocations) {
+    print_relocations();
+  }
+  if (PrintDependencies) {
+    print_dependencies();
+  }
+  if (PrintExceptionHandlers) {
+    print_handler_table();
+    print_nul_chk_table();
+  }
+  if (xtty != NULL) {
+    xtty->tail("print_nmethod");
+  }
+}
+
+
+// Promote one word from an assembly-time handle to a live embedded oop.
+inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
+  if (handle == NULL ||
+      // As a special case, IC oops are initialized to 1 or -1.
+      handle == (jobject) Universe::non_oop_word()) {
+    (*dest) = (oop) handle;
+  } else {
+    (*dest) = JNIHandles::resolve_non_null(handle);
+  }
+}
+
+
+// Have to have the same name because it's called by a template
+void nmethod::copy_values(GrowableArray<jobject>* array) {
+  int length = array->length();
+  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
+  oop* dest = oops_begin();
+  for (int index = 0 ; index < length; index++) {
+    initialize_immediate_oop(&dest[index], array->at(index));
+  }
+
+  // Now we can fix up all the oops in the code.  We need to do this
+  // in the code because the assembler uses jobjects as placeholders.
+  // The code and relocations have already been initialized by the
+  // CodeBlob constructor, so it is valid even at this early point to
+  // iterate over relocations and patch the code.
+  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
+}
+
+void nmethod::copy_values(GrowableArray<Metadata*>* array) {
+  int length = array->length();
+  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
+  Metadata** dest = metadata_begin();
+  for (int index = 0 ; index < length; index++) {
+    dest[index] = array->at(index);
+  }
+}
+
+bool nmethod::is_at_poll_return(address pc) {
+  RelocIterator iter(this, pc, pc+1);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::poll_return_type)
+      return true;
+  }
+  return false;
+}
+
+
+bool nmethod::is_at_poll_or_poll_return(address pc) {
+  RelocIterator iter(this, pc, pc+1);
+  while (iter.next()) {
+    relocInfo::relocType t = iter.type();
+    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
+      return true;
+  }
+  return false;
+}
+
+
+void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
+  // re-patch all oop-bearing instructions, just in case some oops moved
+  RelocIterator iter(this, begin, end);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* reloc = iter.oop_reloc();
+      if (initialize_immediates && reloc->oop_is_immediate()) {
+        oop* dest = reloc->oop_addr();
+        initialize_immediate_oop(dest, (jobject) *dest);
+      }
+      // Refresh the oop-related bits of this instruction.
+      reloc->fix_oop_relocation();
+    } else if (iter.type() == relocInfo::metadata_type) {
+      metadata_Relocation* reloc = iter.metadata_reloc();
+      reloc->fix_metadata_relocation();
+    }
+
+    // There must not be any interfering patches or breakpoints.
+    assert(!(iter.type() == relocInfo::breakpoint_type
+             && iter.breakpoint_reloc()->active()),
+           "no active breakpoint");
+  }
+}
+
+
+void nmethod::verify_oop_relocations() {
+  // Ensure sure that the code matches the current oop values
+  RelocIterator iter(this, NULL, NULL);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* reloc = iter.oop_reloc();
+      if (!reloc->oop_is_immediate()) {
+        reloc->verify_oop_relocation();
+      }
+    }
+  }
+}
+
+
+ScopeDesc* nmethod::scope_desc_at(address pc) {
+  PcDesc* pd = pc_desc_at(pc);
+  guarantee(pd != NULL, "scope must be present");
+  return new ScopeDesc(this, pd->scope_decode_offset(),
+                       pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
+                       pd->return_oop());
+}
+
+
+void nmethod::clear_inline_caches() {
+  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
+  if (is_zombie()) {
+    return;
+  }
+
+  RelocIterator iter(this);
+  while (iter.next()) {
+    iter.reloc()->clear_inline_cache();
+  }
+}
+
+
+void nmethod::cleanup_inline_caches() {
+
+  assert_locked_or_safepoint(CompiledIC_lock);
+
+  // If the method is not entrant or zombie then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (!is_in_use()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // This means that the low_boundary is going to be a little too high.
+    // This shouldn't matter, since oops of non-entrant methods are never used.
+    // In fact, why are we bothering to look at oops in a non-entrant method??
+  }
+
+  // Find all calls in an nmethod, and clear the ones that points to zombie methods
+  ResourceMark rm;
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+    switch(iter.type()) {
+      case relocInfo::virtual_call_type:
+      case relocInfo::opt_virtual_call_type: {
+        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        // Ok, to lookup references to zombies here
+        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Clean inline caches pointing to both zombie and not_entrant methods
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
+        }
+        break;
+      }
+      case relocInfo::static_call_type: {
+        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
+        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Clean inline caches pointing to both zombie and not_entrant methods
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
+        }
+        break;
+      }
+    }
+  }
+}
+
+// This is a private interface with the sweeper.
+void nmethod::mark_as_seen_on_stack() {
+  assert(is_not_entrant(), "must be a non-entrant method");
+  // Set the traversal mark to ensure that the sweeper does 2
+  // cleaning passes before moving to zombie.
+  set_stack_traversal_mark(NMethodSweeper::traversal_count());
+}
+
+// Tell if a non-entrant method can be converted to a zombie (i.e.,
+// there are no activations on the stack, not in use by the VM,
+// and not in use by the ServiceThread)
+bool nmethod::can_not_entrant_be_converted() {
+  assert(is_not_entrant(), "must be a non-entrant method");
+
+  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
+  // count can be greater than the stack traversal count before it hits the
+  // nmethod for the second time.
+  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
+         !is_locked_by_vm();
+}
+
+void nmethod::inc_decompile_count() {
+  if (!is_compiled_by_c2() && !is_compiled_by_graal()) return;
+  // Could be gated by ProfileTraps, but do not bother...
+  Method* m = method();
+  if (m == NULL)  return;
+  MethodData* mdo = m->method_data();
+  if (mdo == NULL)  return;
+  // There is a benign race here.  See comments in methodData.hpp.
+  mdo->inc_decompile_count();
+}
+
+void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
+
+  post_compiled_method_unload();
+
+  // Since this nmethod is being unloaded, make sure that dependencies
+  // recorded in instanceKlasses get flushed and pass non-NULL closure to
+  // indicate that this work is being done during a GC.
+  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
+  assert(is_alive != NULL, "Should be non-NULL");
+  // A non-NULL is_alive closure indicates that this is being called during GC.
+  flush_dependencies(is_alive);
+
+  // Break cycle between nmethod & method
+  if (TraceClassUnloading && WizardMode) {
+    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
+                  " unloadable], Method*(" INTPTR_FORMAT
+                  "), cause(" INTPTR_FORMAT ")",
+                  this, (address)_method, (address)cause);
+    if (!Universe::heap()->is_gc_active())
+      cause->klass()->print();
+  }
+  // Unlink the osr method, so we do not look this up again
+  if (is_osr_method()) {
+    invalidate_osr_method();
+  }
+  // If _method is already NULL the Method* is about to be unloaded,
+  // so we don't have to break the cycle. Note that it is possible to
+  // have the Method* live here, in case we unload the nmethod because
+  // it is pointing to some oop (other than the Method*) being unloaded.
+  if (_method != NULL) {
+    // OSR methods point to the Method*, but the Method* does not
+    // point back!
+    if (_method->code() == this) {
+      _method->clear_code(); // Break a cycle
+    }
+    _method = NULL;            // Clear the method of this dead nmethod
+  }
+
+#ifdef GRAAL
+  // The method can only be unloaded after the pointer to the installed code
+  // Java wrapper is no longer alive. Here we need to clear out this weak
+  // reference to the dead object.
+  if (_graal_installed_code != NULL) {
+    _graal_installed_code = NULL;
+  }
+#endif
+
+  // Make the class unloaded - i.e., change state and notify sweeper
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  if (is_in_use()) {
+    // Transitioning directly from live to unloaded -- so
+    // we need to force a cache clean-up; remember this
+    // for later on.
+    CodeCache::set_needs_cache_clean(true);
+  }
+  _state = unloaded;
+
+  // Log the unloading.
+  log_state_change();
+
+  // The Method* is gone at this point
+  assert(_method == NULL, "Tautology");
+
+  set_osr_link(NULL);
+  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
+  NMethodSweeper::notify(this);
+}
+
+void nmethod::invalidate_osr_method() {
+  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
+  // Remove from list of active nmethods
+  if (method() != NULL)
+    method()->method_holder()->remove_osr_nmethod(this);
+  // Set entry as invalid
+  _entry_bci = InvalidOSREntryBci;
+}
+
+void nmethod::log_state_change() const {
+  if (LogCompilation) {
+    if (xtty != NULL) {
+      ttyLocker ttyl;  // keep the following output all in one block
+      if (_state == unloaded) {
+        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
+                         os::current_thread_id());
+      } else {
+        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
+                         os::current_thread_id(),
+                         (_state == zombie ? " zombie='1'" : ""));
+      }
+      log_identity(xtty);
+      xtty->stamp();
+      xtty->end_elem();
+    }
+  }
+  if (PrintCompilation && _state != unloaded) {
+    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
+  }
+}
+
+// Common functionality for both make_not_entrant and make_zombie
+bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
+  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
+  assert(!is_zombie(), "should not already be a zombie");
+
+  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
+  nmethodLocker nml(this);
+  methodHandle the_method(method());
+  No_Safepoint_Verifier nsv;
+
+  {
+    // invalidate osr nmethod before acquiring the patching lock since
+    // they both acquire leaf locks and we don't want a deadlock.
+    // This logic is equivalent to the logic below for patching the
+    // verified entry point of regular methods.
+    if (is_osr_method()) {
+      // this effectively makes the osr nmethod not entrant
+      invalidate_osr_method();
+    }
+
+    // Enter critical section.  Does not block for safepoint.
+    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+
+    if (_state == state) {
+      // another thread already performed this transition so nothing
+      // to do, but return false to indicate this.
+      return false;
+    }
+
+    // The caller can be calling the method statically or through an inline
+    // cache call.
+    if (!is_osr_method() && !is_not_entrant()) {
+      address stub = SharedRuntime::get_handle_wrong_method_stub();
+#ifdef GRAAL
+      if (_graal_installed_code != NULL && !HotSpotNmethod::isDefault(_graal_installed_code)) {
+        // This was manually installed machine code. Patch entry with stub that throws an exception.
+        stub = SharedRuntime::get_deoptimized_installed_code_stub();
+      }
+#endif
+      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), stub);
+    }
+
+    if (is_in_use()) {
+      // It's a true state change, so mark the method as decompiled.
+      // Do it only for transition from alive.
+      inc_decompile_count();
+    }
+
+    // Change state
+    _state = state;
+
+    // Log the transition once
+    log_state_change();
+
+    // Remove nmethod from method.
+    // We need to check if both the _code and _from_compiled_code_entry_point
+    // refer to this nmethod because there is a race in setting these two fields
+    // in Method* as seen in bugid 4947125.
+    // If the vep() points to the zombie nmethod, the memory for the nmethod
+    // could be flushed and the compiler and vtable stubs could still call
+    // through it.
+    if (method() != NULL && (method()->code() == this ||
+                             method()->from_compiled_entry() == verified_entry_point())) {
+      HandleMark hm;
+      method()->clear_code();
+    }
+
+    if (state == not_entrant) {
+      mark_as_seen_on_stack();
+    }
+
+  } // leave critical region under Patching_lock
+
+  // When the nmethod becomes zombie it is no longer alive so the
+  // dependencies must be flushed.  nmethods in the not_entrant
+  // state will be flushed later when the transition to zombie
+  // happens or they get unloaded.
+  if (state == zombie) {
+    {
+      // Flushing dependecies must be done before any possible
+      // safepoint can sneak in, otherwise the oops used by the
+      // dependency logic could have become stale.
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      flush_dependencies(NULL);
+    }
+
+    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
+    // event and it hasn't already been reported for this nmethod then
+    // report it now. The event may have been reported earilier if the GC
+    // marked it for unloading). JvmtiDeferredEventQueue support means
+    // we no longer go to a safepoint here.
+    post_compiled_method_unload();
+
+#ifdef ASSERT
+    // It's no longer safe to access the oops section since zombie
+    // nmethods aren't scanned for GC.
+    _oops_are_stale = true;
+#endif
+  } else {
+    assert(state == not_entrant, "other cases may need to be handled differently");
+  }
+
+  if (TraceCreateZombies) {
+    ResourceMark m;
+    tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", this, this->method()->name_and_sig_as_C_string(), (state == not_entrant) ? "not entrant" : "zombie");
+  }
+
+  // Make sweeper aware that there is a zombie method that needs to be removed
+  NMethodSweeper::notify(this);
+
+  return true;
+}
+
+void nmethod::flush() {
+  // Note that there are no valid oops in the nmethod anymore.
+  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
+  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
+
+  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  // completely deallocate this method
+  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
+  if (PrintMethodFlushing) {
+    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
+        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
+  }
+
+  // We need to deallocate any ExceptionCache data.
+  // Note that we do not need to grab the nmethod lock for this, it
+  // better be thread safe if we're disposing of it!
+  ExceptionCache* ec = exception_cache();
+  set_exception_cache(NULL);
+  while(ec != NULL) {
+    ExceptionCache* next = ec->next();
+    delete ec;
+    ec = next;
+  }
+
+  if (on_scavenge_root_list()) {
+    CodeCache::drop_scavenge_root_nmethod(this);
+  }
+
+  if (is_speculatively_disconnected()) {
+    CodeCache::remove_saved_code(this);
+  }
+
+#ifdef SHARK
+  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
+#endif // SHARK
+
+  ((CodeBlob*)(this))->flush();
+
+  CodeCache::free(this);
+}
+
+
+//
+// Notify all classes this nmethod is dependent on that it is no
+// longer dependent. This should only be called in two situations.
+// First, when a nmethod transitions to a zombie all dependents need
+// to be clear.  Since zombification happens at a safepoint there's no
+// synchronization issues.  The second place is a little more tricky.
+// During phase 1 of mark sweep class unloading may happen and as a
+// result some nmethods may get unloaded.  In this case the flushing
+// of dependencies must happen during phase 1 since after GC any
+// dependencies in the unloaded nmethod won't be updated, so
+// traversing the dependency information in unsafe.  In that case this
+// function is called with a non-NULL argument and this function only
+// notifies instanceKlasses that are reachable
+
+void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
+  "is_alive is non-NULL if and only if we are called during GC");
+  if (!has_flushed_dependencies()) {
+    set_has_flushed_dependencies();
+    for (Dependencies::DepStream deps(this); deps.next(); ) {
+      Klass* klass = deps.context_type();
+      if (klass == NULL)  continue;  // ignore things like evol_method
+
+      // During GC the is_alive closure is non-NULL, and is used to
+      // determine liveness of dependees that need to be updated.
+      if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
+        InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
+      }
+    }
+  }
+}
+
+
+// If this oop is not live, the nmethod can be unloaded.
+bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
+  assert(root != NULL, "just checking");
+  oop obj = *root;
+  if (obj == NULL || is_alive->do_object_b(obj)) {
+      return false;
+  }
+
+  // If ScavengeRootsInCode is true, an nmethod might be unloaded
+  // simply because one of its constant oops has gone dead.
+  // No actual classes need to be unloaded in order for this to occur.
+  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
+  make_unloaded(is_alive, obj);
+  return true;
+}
+
+// ------------------------------------------------------------------
+// post_compiled_method_load_event
+// new method for install_code() path
+// Transfer information from compilation to jvmti
+void nmethod::post_compiled_method_load_event() {
+
+  Method* moop = method();
+#ifndef USDT2
+  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
+      moop->klass_name()->bytes(),
+      moop->klass_name()->utf8_length(),
+      moop->name()->bytes(),
+      moop->name()->utf8_length(),
+      moop->signature()->bytes(),
+      moop->signature()->utf8_length(),
+      insts_begin(), insts_size());
+#else /* USDT2 */
+  HOTSPOT_COMPILED_METHOD_LOAD(
+      (char *) moop->klass_name()->bytes(),
+      moop->klass_name()->utf8_length(),
+      (char *) moop->name()->bytes(),
+      moop->name()->utf8_length(),
+      (char *) moop->signature()->bytes(),
+      moop->signature()->utf8_length(),
+      insts_begin(), insts_size());
+#endif /* USDT2 */
+
+  if (JvmtiExport::should_post_compiled_method_load() ||
+      JvmtiExport::should_post_compiled_method_unload()) {
+    get_and_cache_jmethod_id();
+  }
+
+  if (JvmtiExport::should_post_compiled_method_load()) {
+    // Let the Service thread (which is a real Java thread) post the event
+    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+    JvmtiDeferredEventQueue::enqueue(
+      JvmtiDeferredEvent::compiled_method_load_event(this));
+  }
+}
+
+jmethodID nmethod::get_and_cache_jmethod_id() {
+  if (_jmethod_id == NULL) {
+    // Cache the jmethod_id since it can no longer be looked up once the
+    // method itself has been marked for unloading.
+    _jmethod_id = method()->jmethod_id();
+  }
+  return _jmethod_id;
+}
+
+void nmethod::post_compiled_method_unload() {
+  if (unload_reported()) {
+    // During unloading we transition to unloaded and then to zombie
+    // and the unloading is reported during the first transition.
+    return;
+  }
+
+  assert(_method != NULL && !is_unloaded(), "just checking");
+  DTRACE_METHOD_UNLOAD_PROBE(method());
+
+  // If a JVMTI agent has enabled the CompiledMethodUnload event then
+  // post the event. Sometime later this nmethod will be made a zombie
+  // by the sweeper but the Method* will not be valid at that point.
+  // If the _jmethod_id is null then no load event was ever requested
+  // so don't bother posting the unload.  The main reason for this is
+  // that the jmethodID is a weak reference to the Method* so if
+  // it's being unloaded there's no way to look it up since the weak
+  // ref will have been cleared.
+  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
+    assert(!unload_reported(), "already unloaded");
+    JvmtiDeferredEvent event =
+      JvmtiDeferredEvent::compiled_method_unload_event(this,
+          _jmethod_id, insts_begin());
+    if (SafepointSynchronize::is_at_safepoint()) {
+      // Don't want to take the queueing lock. Add it as pending and
+      // it will get enqueued later.
+      JvmtiDeferredEventQueue::add_pending_event(event);
+    } else {
+      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+      JvmtiDeferredEventQueue::enqueue(event);
+    }
+  }
+
+  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
+  // any time. As the nmethod is being unloaded now we mark it has
+  // having the unload event reported - this will ensure that we don't
+  // attempt to report the event in the unlikely scenario where the
+  // event is enabled at the time the nmethod is made a zombie.
+  set_unload_reported();
+}
+
+// This is called at the end of the strong tracing/marking phase of a
+// GC to unload an nmethod if it contains otherwise unreachable
+// oops.
+
+void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
+  // Make sure the oop's ready to receive visitors
+  assert(!is_zombie() && !is_unloaded(),
+         "should not call follow on zombie or unloaded nmethod");
+
+  // If the method is not entrant then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+  // The RedefineClasses() API can cause the class unloading invariant
+  // to no longer be true. See jvmtiExport.hpp for details.
+  // Also, leave a debugging breadcrumb in local flag.
+  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
+  if (a_class_was_redefined) {
+    // This set of the unloading_occurred flag is done before the
+    // call to post_compiled_method_unload() so that the unloading
+    // of this nmethod is reported.
+    unloading_occurred = true;
+  }
+
+#ifdef GRAAL
+  // Follow Graal method
+  if (_graal_installed_code != NULL) {
+    if (HotSpotNmethod::isDefault(_graal_installed_code)) {
+      if (!is_alive->do_object_b(_graal_installed_code)) {
+        _graal_installed_code = NULL;
+      }
+    } else {
+      if (can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) {
+        return;
+      }
+    }
+  }
+#endif
+
+  // Exception cache
+  ExceptionCache* ec = exception_cache();
+  while (ec != NULL) {
+    Klass* ex_klass = ec->exception_type();
+    ExceptionCache* next_ec = ec->next();
+    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
+      remove_from_exception_cache(ec);
+    }
+    ec = next_ec;
+  }
+
+  // If class unloading occurred we first iterate over all inline caches and
+  // clear ICs where the cached oop is referring to an unloaded klass or method.
+  // The remaining live cached oops will be traversed in the relocInfo::oop_type
+  // iteration below.
+  if (unloading_occurred) {
+    RelocIterator iter(this, low_boundary);
+    while(iter.next()) {
+      if (iter.type() == relocInfo::virtual_call_type) {
+        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        if (ic->is_icholder_call()) {
+          // The only exception is compiledICHolder oops which may
+          // yet be marked below. (We check this further below).
+          CompiledICHolder* cichk_oop = ic->cached_icholder();
+          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
+              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
+              continue;
+            }
+        } else {
+          Metadata* ic_oop = ic->cached_metadata();
+          if (ic_oop != NULL) {
+            if (ic_oop->is_klass()) {
+              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
+                continue;
+              }
+            } else if (ic_oop->is_method()) {
+              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
+                continue;
+              }
+            } else {
+              ShouldNotReachHere();
+            }
+          }
+          }
+          ic->set_to_clean();
+      }
+    }
+  }
+
+  // Compiled code
+  {
+  RelocIterator iter(this, low_boundary);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* r = iter.oop_reloc();
+      // In this loop, we must only traverse those oops directly embedded in
+      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
+      assert(1 == (r->oop_is_immediate()) +
+                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+             "oop must be found in exactly one place");
+      if (r->oop_is_immediate() && r->oop_value() != NULL) {
+        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
+          return;
+        }
+      }
+    }
+  }
+  }
+
+
+  // Scopes
+  for (oop* p = oops_begin(); p < oops_end(); p++) {
+    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
+    if (can_unload(is_alive, p, unloading_occurred)) {
+      return;
+    }
+  }
+
+  // Ensure that all metadata is still alive
+  verify_metadata_loaders(low_boundary, is_alive);
+}
+
+#ifdef ASSERT
+
+class CheckClass : AllStatic {
+  static BoolObjectClosure* _is_alive;
+
+  // Check class_loader is alive for this bit of metadata.
+  static void check_class(Metadata* md) {
+    Klass* klass = NULL;
+    if (md->is_klass()) {
+      klass = ((Klass*)md);
+    } else if (md->is_method()) {
+      klass = ((Method*)md)->method_holder();
+    } else if (md->is_methodData()) {
+      klass = ((MethodData*)md)->method()->method_holder();
+    } else {
+      md->print();
+      ShouldNotReachHere();
+    }
+    assert(klass->is_loader_alive(_is_alive), "must be alive");
+  }
+ public:
+  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
+    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
+    _is_alive = is_alive;
+    nm->metadata_do(check_class);
+  }
+};
+
+// This is called during a safepoint so can use static data
+BoolObjectClosure* CheckClass::_is_alive = NULL;
+#endif // ASSERT
+
+
+// Processing of oop references should have been sufficient to keep
+// all strong references alive.  Any weak references should have been
+// cleared as well.  Visit all the metadata and ensure that it's
+// really alive.
+void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
+#ifdef ASSERT
+    RelocIterator iter(this, low_boundary);
+    while (iter.next()) {
+    // static_stub_Relocations may have dangling references to
+    // Method*s so trim them out here.  Otherwise it looks like
+    // compiled code is maintaining a link to dead metadata.
+    address static_call_addr = NULL;
+    if (iter.type() == relocInfo::opt_virtual_call_type) {
+      CompiledIC* cic = CompiledIC_at(iter.reloc());
+      if (!cic->is_call_to_interpreted()) {
+        static_call_addr = iter.addr();
+      }
+    } else if (iter.type() == relocInfo::static_call_type) {
+      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
+      if (!csc->is_call_to_interpreted()) {
+        static_call_addr = iter.addr();
+      }
+    }
+    if (static_call_addr != NULL) {
+      RelocIterator sciter(this, low_boundary);
+      while (sciter.next()) {
+        if (sciter.type() == relocInfo::static_stub_type &&
+            sciter.static_stub_reloc()->static_call() == static_call_addr) {
+          sciter.static_stub_reloc()->clear_inline_cache();
+        }
+      }
+    }
+  }
+  // Check that the metadata embedded in the nmethod is alive
+  CheckClass::do_check_class(is_alive, this);
+#endif
+}
+
+
+// Iterate over metadata calling this function.   Used by RedefineClasses
+void nmethod::metadata_do(void f(Metadata*)) {
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+  {
+    // Visit all immediate references that are embedded in the instruction stream.
+    RelocIterator iter(this, low_boundary);
+    while (iter.next()) {
+      if (iter.type() == relocInfo::metadata_type ) {
+        metadata_Relocation* r = iter.metadata_reloc();
+        // In this lmetadata, we must only follow those metadatas directly embedded in
+        // the code.  Other metadatas (oop_index>0) are seen as part of
+        // the metadata section below.
+        assert(1 == (r->metadata_is_immediate()) +
+               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
+               "metadata must be found in exactly one place");
+        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
+          Metadata* md = r->metadata_value();
+          f(md);
+        }
+      }
+    }
+  }
+
+  // Visit the metadata section
+  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
+    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
+    Metadata* md = *p;
+    f(md);
+  }
+  // Call function Method*, not embedded in these other places.
+  if (_method != NULL) f(_method);
+}
+
+
+// This method is called twice during GC -- once while
+// tracing the "active" nmethods on thread stacks during
+// the (strong) marking phase, and then again when walking
+// the code cache contents during the weak roots processing
+// phase. The two uses are distinguished by means of the
+// 'do_strong_roots_only' flag, which is true in the first
+// case. We want to walk the weak roots in the nmethod
+// only in the second case. The weak roots in the nmethod
+// are the oops in the ExceptionCache and the InlineCache
+// oops.
+void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
+  // make sure the oops ready to receive visitors
+  assert(!is_zombie() && !is_unloaded(),
+         "should not call follow on zombie or unloaded nmethod");
+
+  // If the method is not entrant or zombie then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+#ifdef GRAAL
+  if (_graal_installed_code != NULL) {
+    f->do_oop((oop*) &_graal_installed_code);
+  }
+  if (_triggered_deoptimizations != NULL) {
+    f->do_oop((oop*) &_triggered_deoptimizations);
+  }
+#endif
+
+  RelocIterator iter(this, low_boundary);
+
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type ) {
+      oop_Relocation* r = iter.oop_reloc();
+      // In this loop, we must only follow those oops directly embedded in
+      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
+      assert(1 == (r->oop_is_immediate()) +
+                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+             "oop must be found in exactly one place");
+      if (r->oop_is_immediate() && r->oop_value() != NULL) {
+        f->do_oop(r->oop_addr());
+      }
+    }
+  }
+
+  // Scopes
+  // This includes oop constants not inlined in the code stream.
+  for (oop* p = oops_begin(); p < oops_end(); p++) {
+    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
+    f->do_oop(p);
+  }
+}
+
+#define NMETHOD_SENTINEL ((nmethod*)badAddress)
+
+nmethod* volatile nmethod::_oops_do_mark_nmethods;
+
+// An nmethod is "marked" if its _mark_link is set non-null.
+// Even if it is the end of the linked list, it will have a non-null link value,
+// as long as it is on the list.
+// This code must be MP safe, because it is used from parallel GC passes.
+bool nmethod::test_set_oops_do_mark() {
+  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
+  nmethod* observed_mark_link = _oops_do_mark_link;
+  if (observed_mark_link == NULL) {
+    // Claim this nmethod for this thread to mark.
+    observed_mark_link = (nmethod*)
+      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
+    if (observed_mark_link == NULL) {
+
+      // Atomically append this nmethod (now claimed) to the head of the list:
+      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
+      for (;;) {
+        nmethod* required_mark_nmethods = observed_mark_nmethods;
+        _oops_do_mark_link = required_mark_nmethods;
+        observed_mark_nmethods = (nmethod*)
+          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
+        if (observed_mark_nmethods == required_mark_nmethods)
+          break;
+      }
+      // Mark was clear when we first saw this guy.
+      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
+      return false;
+    }
+  }
+  // On fall through, another racing thread marked this nmethod before we did.
+  return true;
+}
+
+void nmethod::oops_do_marking_prologue() {
+  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
+  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
+  // We use cmpxchg_ptr instead of regular assignment here because the user
+  // may fork a bunch of threads, and we need them all to see the same state.
+  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
+  guarantee(observed == NULL, "no races in this sequential code");
+}
+
+void nmethod::oops_do_marking_epilogue() {
+  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
+  nmethod* cur = _oops_do_mark_nmethods;
+  while (cur != NMETHOD_SENTINEL) {
+    assert(cur != NULL, "not NULL-terminated");
+    nmethod* next = cur->_oops_do_mark_link;
+    cur->_oops_do_mark_link = NULL;
+    cur->fix_oop_relocations();
+    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
+    cur = next;
+  }
+  void* required = _oops_do_mark_nmethods;
+  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
+  guarantee(observed == required, "no races in this sequential code");
+  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
+}
+
+class DetectScavengeRoot: public OopClosure {
+  bool     _detected_scavenge_root;
+public:
+  DetectScavengeRoot() : _detected_scavenge_root(false)
+  { NOT_PRODUCT(_print_nm = NULL); }
+  bool detected_scavenge_root() { return _detected_scavenge_root; }
+  virtual void do_oop(oop* p) {
+    if ((*p) != NULL && (*p)->is_scavengable()) {
+      NOT_PRODUCT(maybe_print(p));
+      _detected_scavenge_root = true;
+    }
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+#ifndef PRODUCT
+  nmethod* _print_nm;
+  void maybe_print(oop* p) {
+    if (_print_nm == NULL)  return;
+    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
+    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
+                  (intptr_t)(*p), (intptr_t)p);
+    (*p)->print();
+  }
+#endif //PRODUCT
+};
+
+bool nmethod::detect_scavenge_root_oops() {
+  DetectScavengeRoot detect_scavenge_root;
+  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
+  oops_do(&detect_scavenge_root);
+  return detect_scavenge_root.detected_scavenge_root();
+}
+
+// Method that knows how to preserve outgoing arguments at call. This method must be
+// called with a frame corresponding to a Java invoke
+void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
+#ifndef SHARK
+  if (!method()->is_native()) {
+    SimpleScopeDesc ssd(this, fr.pc());
+    Bytecode_invoke call(ssd.method(), ssd.bci());
+    // compiled invokedynamic call sites have an implicit receiver at
+    // resolution time, so make sure it gets GC'ed.
+    bool has_receiver = !call.is_invokestatic();
+    Symbol* signature = call.signature();
+    fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
+  }
+#endif // !SHARK
+}
+
+
+oop nmethod::embeddedOop_at(u_char* p) {
+  RelocIterator iter(this, p, p + 1);
+  while (iter.next())
+    if (iter.type() == relocInfo::oop_type) {
+      return iter.oop_reloc()->oop_value();
+    }
+  return NULL;
+}
+
+
+inline bool includes(void* p, void* from, void* to) {
+  return from <= p && p < to;
+}
+
+
+void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
+  assert(count >= 2, "must be sentinel values, at least");
+
+#ifdef ASSERT
+  // must be sorted and unique; we do a binary search in find_pc_desc()
+  int prev_offset = pcs[0].pc_offset();
+  assert(prev_offset == PcDesc::lower_offset_limit,
+         "must start with a sentinel");
+  for (int i = 1; i < count; i++) {
+    int this_offset = pcs[i].pc_offset();
+    assert(this_offset > prev_offset, "offsets must be sorted");
+    prev_offset = this_offset;
+  }
+  assert(prev_offset == PcDesc::upper_offset_limit,
+         "must end with a sentinel");
+#endif //ASSERT
+
+  // Search for MethodHandle invokes and tag the nmethod.
+  for (int i = 0; i < count; i++) {
+    if (pcs[i].is_method_handle_invoke()) {
+      set_has_method_handle_invokes(true);
+      break;
+    }
+  }
+  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
+
+  int size = count * sizeof(PcDesc);
+  assert(scopes_pcs_size() >= size, "oob");
+  memcpy(scopes_pcs_begin(), pcs, size);
+
+  // Adjust the final sentinel downward.
+  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
+  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
+  last_pc->set_pc_offset(content_size() + 1);
+  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
+    // Fill any rounding gaps with copies of the last record.
+    last_pc[1] = last_pc[0];
+  }
+  // The following assert could fail if sizeof(PcDesc) is not
+  // an integral multiple of oopSize (the rounding term).
+  // If it fails, change the logic to always allocate a multiple
+  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
+  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
+}
+
+void nmethod::copy_scopes_data(u_char* buffer, int size) {
+  assert(scopes_data_size() >= size, "oob");
+  memcpy(scopes_data_begin(), buffer, size);
+}
+
+
+#ifdef ASSERT
+static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
+  PcDesc* lower = nm->scopes_pcs_begin();
+  PcDesc* upper = nm->scopes_pcs_end();
+  lower += 1; // exclude initial sentinel
+  PcDesc* res = NULL;
+  for (PcDesc* p = lower; p < upper; p++) {
+    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
+    if (match_desc(p, pc_offset, approximate)) {
+      if (res == NULL)
+        res = p;
+      else
+        res = (PcDesc*) badAddress;
+    }
+  }
+  return res;
+}
+#endif
+
+
+// Finds a PcDesc with real-pc equal to "pc"
+PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
+  address base_address = code_begin();
+  if ((pc < base_address) ||
+      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
+    return NULL;  // PC is wildly out of range
+  }
+  int pc_offset = (int) (pc - base_address);
+
+  // Check the PcDesc cache if it contains the desired PcDesc
+  // (This as an almost 100% hit rate.)
+  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
+  if (res != NULL) {
+    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
+    return res;
+  }
+
+  // Fallback algorithm: quasi-linear search for the PcDesc
+  // Find the last pc_offset less than the given offset.
+  // The successor must be the required match, if there is a match at all.
+  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
+  PcDesc* lower = scopes_pcs_begin();
+  PcDesc* upper = scopes_pcs_end();
+  upper -= 1; // exclude final sentinel
+  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
+
+#define assert_LU_OK \
+  /* invariant on lower..upper during the following search: */ \
+  assert(lower->pc_offset() <  pc_offset, "sanity"); \
+  assert(upper->pc_offset() >= pc_offset, "sanity")
+  assert_LU_OK;
+
+  // Use the last successful return as a split point.
+  PcDesc* mid = _pc_desc_cache.last_pc_desc();
+  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
+  if (mid->pc_offset() < pc_offset) {
+    lower = mid;
+  } else {
+    upper = mid;
+  }
+
+  // Take giant steps at first (4096, then 256, then 16, then 1)
+  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
+  const int RADIX = (1 << LOG2_RADIX);
+  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
+    while ((mid = lower + step) < upper) {
+      assert_LU_OK;
+      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
+      if (mid->pc_offset() < pc_offset) {
+        lower = mid;
+      } else {
+        upper = mid;
+        break;
+      }
+    }
+    assert_LU_OK;
+  }
+
+  // Sneak up on the value with a linear search of length ~16.
+  while (true) {
+    assert_LU_OK;
+    mid = lower + 1;
+    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
+    if (mid->pc_offset() < pc_offset) {
+      lower = mid;
+    } else {
+      upper = mid;
+      break;
+    }
+  }
+#undef assert_LU_OK
+
+  if (match_desc(upper, pc_offset, approximate)) {
+    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
+    _pc_desc_cache.add_pc_desc(upper);
+    return upper;
+  } else {
+    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
+    return NULL;
+  }
+}
+
+
+bool nmethod::check_all_dependencies() {
+  bool found_check = false;
+  // wholesale check of all dependencies
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    if (deps.check_dependency() != NULL) {
+      found_check = true;
+      NOT_DEBUG(break);
+    }
+  }
+  return found_check;  // tell caller if we found anything
+}
+
+bool nmethod::check_dependency_on(DepChange& changes) {
+  // What has happened:
+  // 1) a new class dependee has been added
+  // 2) dependee and all its super classes have been marked
+  bool found_check = false;  // set true if we are upset
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    // Evaluate only relevant dependencies.
+    if (deps.spot_check_dependency_at(changes) != NULL) {
+      found_check = true;
+      NOT_DEBUG(break);
+    }
+  }
+  return found_check;
+}
+
+bool nmethod::is_evol_dependent_on(Klass* dependee) {
+  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
+  Array<Method*>* dependee_methods = dependee_ik->methods();
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    if (deps.type() == Dependencies::evol_method) {
+      Method* method = deps.method_argument(0);
+      for (int j = 0; j < dependee_methods->length(); j++) {
+        if (dependee_methods->at(j) == method) {
+          // RC_TRACE macro has an embedded ResourceMark
+          RC_TRACE(0x01000000,
+            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
+            _method->method_holder()->external_name(),
+            _method->name()->as_C_string(),
+            _method->signature()->as_C_string(), compile_id(),
+            method->method_holder()->external_name(),
+            method->name()->as_C_string(),
+            method->signature()->as_C_string()));
+          if (TraceDependencies || LogCompilation)
+            deps.log_dependency(dependee);
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
+// Called from mark_for_deoptimization, when dependee is invalidated.
+bool nmethod::is_dependent_on_method(Method* dependee) {
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    if (deps.type() != Dependencies::evol_method)
+      continue;
+    Method* method = deps.method_argument(0);
+    if (method == dependee) return true;
+  }
+  return false;
+}
+
+
+bool nmethod::is_patchable_at(address instr_addr) {
+  assert(insts_contains(instr_addr), "wrong nmethod used");
+  if (is_zombie()) {
+    // a zombie may never be patched
+    return false;
+  }
+  return true;
+}
+
+
+address nmethod::continuation_for_implicit_exception(address pc) {
+  // Exception happened outside inline-cache check code => we are inside
+  // an active nmethod => use cpc to determine a return address
+  int exception_offset = pc - code_begin();
+  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
+#ifdef ASSERT
+  if (cont_offset == 0) {
+    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
+    HandleMark hm(thread);
+    ResourceMark rm(thread);
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    assert(cb != NULL && cb == this, "");
+    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
+    print();
+    method()->print_codes();
+    print_code();
+    print_pcs();
+  }
+#endif
+  if (cont_offset == 0) {
+    // Let the normal error handling report the exception
+    return NULL;
+  }
+  return code_begin() + cont_offset;
+}
+
+
+
+void nmethod_init() {
+  // make sure you didn't forget to adjust the filler fields
+  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
+}
+
+
+//-------------------------------------------------------------------------------------------
+
+
+// QQQ might we make this work from a frame??
+nmethodLocker::nmethodLocker(address pc) {
+  CodeBlob* cb = CodeCache::find_blob(pc);
+  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
+  _nm = (nmethod*)cb;
+  lock_nmethod(_nm);
+}
+
+// Only JvmtiDeferredEvent::compiled_method_unload_event()
+// should pass zombie_ok == true.
+void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
+  if (nm == NULL)  return;
+  Atomic::inc(&nm->_lock_count);
+  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
+}
+
+void nmethodLocker::unlock_nmethod(nmethod* nm) {
+  if (nm == NULL)  return;
+  Atomic::dec(&nm->_lock_count);
+  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
+}
+
+
+// -----------------------------------------------------------------------------
+// nmethod::get_deopt_original_pc
+//
+// Return the original PC for the given PC if:
+// (a) the given PC belongs to a nmethod and
+// (b) it is a deopt PC
+address nmethod::get_deopt_original_pc(const frame* fr) {
+  if (fr->cb() == NULL)  return NULL;
+
+  nmethod* nm = fr->cb()->as_nmethod_or_null();
+  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
+    return nm->get_original_pc(fr);
+
+  return NULL;
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandle
+
+bool nmethod::is_method_handle_return(address return_pc) {
+  if (!has_method_handle_invokes())  return false;
+  PcDesc* pd = pc_desc_at(return_pc);
+  if (pd == NULL)
+    return false;
+  return pd->is_method_handle_invoke();
+}
+
+
+// -----------------------------------------------------------------------------
+// Verification
+
+class VerifyOopsClosure: public OopClosure {
+  nmethod* _nm;
+  bool     _ok;
+public:
+  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
+  bool ok() { return _ok; }
+  virtual void do_oop(oop* p) {
+    if ((*p) == NULL || (*p)->is_oop())  return;
+    if (_ok) {
+      _nm->print_nmethod(true);
+      _ok = false;
+    }
+    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
+void nmethod::verify() {
+
+  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
+  // seems odd.
+
+  if( is_zombie() || is_not_entrant() )
+    return;
+
+  // Make sure all the entry points are correctly aligned for patching.
+  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
+
+  // assert(method()->is_oop(), "must be valid");
+
+  ResourceMark rm;
+
+  if (!CodeCache::contains(this)) {
+    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
+  }
+
+  if(is_native_method() )
+    return;
+
+  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
+  if (nm != this) {
+    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
+                  this));
+  }
+
+  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
+    if (! p->verify(this)) {
+      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
+    }
+  }
+
+  VerifyOopsClosure voc(this);
+  oops_do(&voc);
+  assert(voc.ok(), "embedded oops must be OK");
+  verify_scavenge_root_oops();
+
+  verify_scopes();
+}
+
+
+void nmethod::verify_interrupt_point(address call_site) {
+  // This code does not work in release mode since
+  // owns_lock only is available in debug mode.
+  CompiledIC* ic = NULL;
+  Thread *cur = Thread::current();
+  if (CompiledIC_lock->owner() == cur ||
+      ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
+       SafepointSynchronize::is_at_safepoint())) {
+    ic = CompiledIC_at(this, call_site);
+    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+  } else {
+    MutexLocker ml_verify (CompiledIC_lock);
+    ic = CompiledIC_at(this, call_site);
+  }
+
+  PcDesc* pd = pc_desc_at(ic->end_of_call());
+  assert(pd != NULL, "PcDesc must exist");
+  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
+                                     pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
+                                     pd->return_oop());
+       !sd->is_top(); sd = sd->sender()) {
+    sd->verify();
+  }
+}
+
+void nmethod::verify_scopes() {
+  if( !method() ) return;       // Runtime stubs have no scope
+  if (method()->is_native()) return; // Ignore stub methods.
+  // iterate through all interrupt point
+  // and verify the debug information is valid.
+  RelocIterator iter((nmethod*)this);
+  while (iter.next()) {
+    address stub = NULL;
+    switch (iter.type()) {
+      case relocInfo::virtual_call_type:
+        verify_interrupt_point(iter.addr());
+        break;
+      case relocInfo::opt_virtual_call_type:
+        stub = iter.opt_virtual_call_reloc()->static_stub();
+        verify_interrupt_point(iter.addr());
+        break;
+      case relocInfo::static_call_type:
+        stub = iter.static_call_reloc()->static_stub();
+        //verify_interrupt_point(iter.addr());
+        break;
+      case relocInfo::runtime_call_type:
+        address destination = iter.reloc()->value();
+        // Right now there is no way to find out which entries support
+        // an interrupt point.  It would be nice if we had this
+        // information in a table.
+        break;
+    }
+    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Non-product code
+#ifndef PRODUCT
+
+class DebugScavengeRoot: public OopClosure {
+  nmethod* _nm;
+  bool     _ok;
+public:
+  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
+  bool ok() { return _ok; }
+  virtual void do_oop(oop* p) {
+    if ((*p) == NULL || !(*p)->is_scavengable())  return;
+    if (_ok) {
+      _nm->print_nmethod(true);
+      _ok = false;
+    }
+    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+    (*p)->print();
+  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
+void nmethod::verify_scavenge_root_oops() {
+  if (!on_scavenge_root_list()) {
+    // Actually look inside, to verify the claim that it's clean.
+    DebugScavengeRoot debug_scavenge_root(this);
+    oops_do(&debug_scavenge_root);
+    if (!debug_scavenge_root.ok())
+      fatal("found an unadvertised bad scavengable oop in the code cache");
+  }
+  assert(scavenge_root_not_marked(), "");
+}
+
+#endif // PRODUCT
+
+// Printing operations
+
+void nmethod::print() const {
+  ResourceMark rm;
+  ttyLocker ttyl;   // keep the following output all in one block
+
+  tty->print("Compiled method ");
+
+  if (is_compiled_by_c1()) {
+    tty->print("(c1) ");
+  } else if (is_compiled_by_c2()) {
+    tty->print("(c2) ");
+  } else if (is_compiled_by_shark()) {
+    tty->print("(shark) ");
+  } else if (is_compiled_by_graal()) {
+    tty->print("(Graal) ");
+  } else {
+    tty->print("(nm) ");
+  }
+
+  print_on(tty, NULL);
+
+  if (WizardMode) {
+    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
+    tty->print(" for method " INTPTR_FORMAT , (address)method());
+    tty->print(" { ");
+    if (is_in_use())      tty->print("in_use ");
+    if (is_not_entrant()) tty->print("not_entrant ");
+    if (is_zombie())      tty->print("zombie ");
+    if (is_unloaded())    tty->print("unloaded ");
+    if (on_scavenge_root_list())  tty->print("scavenge_root ");
+    tty->print_cr("}:");
+  }
+  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              (address)this,
+                                              (address)this + size(),
+                                              size());
+  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              relocation_begin(),
+                                              relocation_end(),
+                                              relocation_size());
+  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              consts_begin(),
+                                              consts_end(),
+                                              consts_size());
+  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              insts_begin(),
+                                              insts_end(),
+                                              insts_size());
+  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              stub_begin(),
+                                              stub_end(),
+                                              stub_size());
+  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              oops_begin(),
+                                              oops_end(),
+                                              oops_size());
+  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              metadata_begin(),
+                                              metadata_end(),
+                                              metadata_size());
+  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              scopes_data_begin(),
+                                              scopes_data_end(),
+                                              scopes_data_size());
+  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              scopes_pcs_begin(),
+                                              scopes_pcs_end(),
+                                              scopes_pcs_size());
+  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              dependencies_begin(),
+                                              dependencies_end(),
+                                              dependencies_size());
+  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              handler_table_begin(),
+                                              handler_table_end(),
+                                              handler_table_size());
+  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+                                              nul_chk_table_begin(),
+                                              nul_chk_table_end(),
+                                              nul_chk_table_size());
+}
+
+void nmethod::print_code() {
+  HandleMark hm;
+  ResourceMark m;
+  Disassembler::decode(this);
+}
+
+
+#ifndef PRODUCT
+
+void nmethod::print_scopes() {
+  // Find the first pc desc for all scopes in the code and print it.
+  ResourceMark rm;
+  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
+    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
+      continue;
+
+    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
+    sd->print_on(tty, p);
+  }
+}
+
+void nmethod::print_dependencies() {
+  ResourceMark rm;
+  ttyLocker ttyl;   // keep the following output all in one block
+  tty->print_cr("Dependencies:");
+  for (Dependencies::DepStream deps(this); deps.next(); ) {
+    deps.print_dependency();
+    Klass* ctxk = deps.context_type();
+    if (ctxk != NULL) {
+      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
+        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
+      }
+    }
+    deps.log_dependency();  // put it into the xml log also
+  }
+}
+
+
+void nmethod::print_relocations() {
+  ResourceMark m;       // in case methods get printed via the debugger
+  tty->print_cr("relocations:");
+  RelocIterator iter(this);
+  iter.print();
+  if (UseRelocIndex) {
+    jint* index_end   = (jint*)relocation_end() - 1;
+    jint  index_size  = *index_end;
+    jint* index_start = (jint*)( (address)index_end - index_size );
+    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
+    if (index_size > 0) {
+      jint* ip;
+      for (ip = index_start; ip+2 <= index_end; ip += 2)
+        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
+                      ip[0],
+                      ip[1],
+                      header_end()+ip[0],
+                      relocation_begin()-1+ip[1]);
+      for (; ip < index_end; ip++)
+        tty->print_cr("  (%d ?)", ip[0]);
+      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
+      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
+    }
+  }
+}
+
+
+void nmethod::print_pcs() {
+  ResourceMark m;       // in case methods get printed via debugger
+  tty->print_cr("pc-bytecode offsets:");
+  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
+    p->print(this);
+  }
+}
+
+#endif // PRODUCT
+
+const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
+  RelocIterator iter(this, begin, end);
+  bool have_one = false;
+  while (iter.next()) {
+    have_one = true;
+    switch (iter.type()) {
+        case relocInfo::none:                  return "no_reloc";
+        case relocInfo::oop_type: {
+          stringStream st;
+          oop_Relocation* r = iter.oop_reloc();
+          oop obj = r->oop_value();
+          st.print("oop(");
+          if (obj == NULL) st.print("NULL");
+          else obj->print_value_on(&st);
+          st.print(")");
+          return st.as_string();
+        }
+        case relocInfo::metadata_type: {
+          stringStream st;
+          metadata_Relocation* r = iter.metadata_reloc();
+          Metadata* obj = r->metadata_value();
+          st.print("metadata(");
+          if (obj == NULL) st.print("NULL");
+          else obj->print_value_on(&st);
+          st.print(")");
+          return st.as_string();
+        }
+        case relocInfo::virtual_call_type:     return "virtual_call";
+        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
+        case relocInfo::static_call_type:      return "static_call";
+        case relocInfo::static_stub_type:      return "static_stub";
+        case relocInfo::runtime_call_type:     return "runtime_call";
+        case relocInfo::external_word_type:    return "external_word";
+        case relocInfo::internal_word_type:    return "internal_word";
+        case relocInfo::section_word_type:     return "section_word";
+        case relocInfo::poll_type:             return "poll";
+        case relocInfo::poll_return_type:      return "poll_return";
+        case relocInfo::type_mask:             return "type_bit_mask";
+    }
+  }
+  return have_one ? "other" : NULL;
+}
+
+// Return a the last scope in (begin..end]
+ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
+  PcDesc* p = pc_desc_near(begin+1);
+  if (p != NULL && p->real_pc(this) <= end) {
+    return new ScopeDesc(this, p->scope_decode_offset(),
+                         p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(),
+                         p->return_oop());
+  }
+  return NULL;
+}
+
+void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
+  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
+  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
+  if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
+  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
+  if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
+
+  if (has_method_handle_invokes())
+    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
+
+  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
+
+  if (block_begin == entry_point()) {
+    methodHandle m = method();
+    if (m.not_null()) {
+      stream->print("  # ");
+      m->print_value_on(stream);
+      stream->cr();
+    }
+    if (m.not_null() && !is_osr_method()) {
+      ResourceMark rm;
+      int sizeargs = m->size_of_parameters();
+      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
+      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
+      {
+        int sig_index = 0;
+        if (!m->is_static())
+          sig_bt[sig_index++] = T_OBJECT; // 'this'
+        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
+          BasicType t = ss.type();
+          sig_bt[sig_index++] = t;
+          if (type2size[t] == 2) {
+            sig_bt[sig_index++] = T_VOID;
+          } else {
+            assert(type2size[t] == 1, "size is 1 or 2");
+          }
+        }
+        assert(sig_index == sizeargs, "");
+      }
+      const char* spname = "sp"; // make arch-specific?
+      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
+      int stack_slot_offset = this->frame_size() * wordSize;
+      int tab1 = 14, tab2 = 24;
+      int sig_index = 0;
+      int arg_index = (m->is_static() ? 0 : -1);
+      bool did_old_sp = false;
+      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
+        bool at_this = (arg_index == -1);
+        bool at_old_sp = false;
+        BasicType t = (at_this ? T_OBJECT : ss.type());
+        assert(t == sig_bt[sig_index], "sigs in sync");
+        if (at_this)
+          stream->print("  # this: ");
+        else
+          stream->print("  # parm%d: ", arg_index);
+        stream->move_to(tab1);
+        VMReg fst = regs[sig_index].first();
+        VMReg snd = regs[sig_index].second();
+        if (fst->is_reg()) {
+          stream->print("%s", fst->name());
+          if (snd->is_valid())  {
+            stream->print(":%s", snd->name());
+          }
+        } else if (fst->is_stack()) {
+          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
+          if (offset == stack_slot_offset)  at_old_sp = true;
+          stream->print("[%s+0x%x]", spname, offset);
+        } else {
+          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
+        }
+        stream->print(" ");
+        stream->move_to(tab2);
+        stream->print("= ");
+        if (at_this) {
+          m->method_holder()->print_value_on(stream);
+        } else {
+          bool did_name = false;
+          if (!at_this && ss.is_object()) {
+            Symbol* name = ss.as_symbol_or_null();
+            if (name != NULL) {
+              name->print_value_on(stream);
+              did_name = true;
+            }
+          }
+          if (!did_name)
+            stream->print("%s", type2name(t));
+        }
+        if (at_old_sp) {
+          stream->print("  (%s of caller)", spname);
+          did_old_sp = true;
+        }
+        stream->cr();
+        sig_index += type2size[t];
+        arg_index += 1;
+        if (!at_this)  ss.next();
+      }
+      if (!did_old_sp) {
+        stream->print("  # ");
+        stream->move_to(tab1);
+        stream->print("[%s+0x%x]", spname, stack_slot_offset);
+        stream->print("  (%s of caller)", spname);
+        stream->cr();
+      }
+    }
+  }
+}
+
+void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
+  // First, find an oopmap in (begin, end].
+  // We use the odd half-closed interval so that oop maps and scope descs
+  // which are tied to the byte after a call are printed with the call itself.
+  address base = code_begin();
+  OopMapSet* oms = oop_maps();
+  if (oms != NULL) {
+    for (int i = 0, imax = oms->size(); i < imax; i++) {
+      OopMap* om = oms->at(i);
+      address pc = base + om->offset();
+      if (pc > begin) {
+        if (pc <= end) {
+          st->move_to(column);
+          st->print("; ");
+          om->print_on(st);
+        }
+        break;
+      }
+    }
+  }
+
+  // Print any debug info present at this pc.
+  ScopeDesc* sd  = scope_desc_in(begin, end);
+  if (sd != NULL) {
+    st->move_to(column);
+    if (sd->bci() == SynchronizationEntryBCI) {
+      st->print(";*synchronization entry");
+    } else {
+      if (sd->method() == NULL) {
+        st->print("method is NULL");
+      } else if (sd->method()->is_native()) {
+        st->print("method is native");
+      } else {
+        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
+        st->print(";*%s", Bytecodes::name(bc));
+        switch (bc) {
+        case Bytecodes::_invokevirtual:
+        case Bytecodes::_invokespecial:
+        case Bytecodes::_invokestatic:
+        case Bytecodes::_invokeinterface:
+          {
+            Bytecode_invoke invoke(sd->method(), sd->bci());
+            st->print(" ");
+            if (invoke.name() != NULL)
+              invoke.name()->print_symbol_on(st);
+            else
+              st->print("<UNKNOWN>");
+            break;
+          }
+        case Bytecodes::_getfield:
+        case Bytecodes::_putfield:
+        case Bytecodes::_getstatic:
+        case Bytecodes::_putstatic:
+          {
+            Bytecode_field field(sd->method(), sd->bci());
+            st->print(" ");
+            if (field.name() != NULL)
+              field.name()->print_symbol_on(st);
+            else
+              st->print("<UNKNOWN>");
+          }
+        }
+      }
+    }
+
+    // Print all scopes
+    for (;sd != NULL; sd = sd->sender()) {
+      st->move_to(column);
+      st->print("; -");
+      if (sd->method() == NULL) {
+        st->print("method is NULL");
+      } else {
+        sd->method()->print_short_name(st);
+      }
+      int lineno = sd->method()->line_number_from_bci(sd->bci());
+      if (lineno != -1) {
+        st->print("@%d (line %d)", sd->bci(), lineno);
+      } else {
+        st->print("@%d", sd->bci());
+      }
+      st->cr();
+    }
+  }
+
+  // Print relocation information
+  const char* str = reloc_string_for(begin, end);
+  if (str != NULL) {
+    if (sd != NULL) st->cr();
+    st->move_to(column);
+    st->print(";   {%s}", str);
+  }
+  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
+  if (cont_offset != 0) {
+    st->move_to(column);
+    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
+  }
+
+}
+
+#ifndef PRODUCT
+
+void nmethod::print_value_on(outputStream* st) const {
+  st->print("nmethod");
+  print_on(st, NULL);
+}
+
+void nmethod::print_calls(outputStream* st) {
+  RelocIterator iter(this);
+  while (iter.next()) {
+    switch (iter.type()) {
+    case relocInfo::virtual_call_type:
+    case relocInfo::opt_virtual_call_type: {
+      VerifyMutexLocker mc(CompiledIC_lock);
+      CompiledIC_at(iter.reloc())->print();
+      break;
+    }
+    case relocInfo::static_call_type:
+      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
+      compiledStaticCall_at(iter.reloc())->print();
+      break;
+    }
+  }
+}
+
+void nmethod::print_handler_table() {
+  ExceptionHandlerTable(this).print();
+}
+
+void nmethod::print_nul_chk_table() {
+  ImplicitExceptionTable(this).print(code_begin());
+}
+
+#endif // PRODUCT
+
+void nmethod::print_statistics() {
+  ttyLocker ttyl;
+  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
+  nmethod_stats.print_native_nmethod_stats();
+  nmethod_stats.print_nmethod_stats();
+  DebugInformationRecorder::print_statistics();
+  nmethod_stats.print_pc_stats();
+  Dependencies::print_statistics();
+  if (xtty != NULL)  xtty->tail("statistics");
+}
--- a/src/share/vm/code/nmethod.hpp	Tue May 14 22:02:23 2013 +0200
+++ b/src/share/vm/code/nmethod.hpp	Wed May 15 14:53:34 2013 +0200
@@ -689,7 +689,7 @@
 
   // Prints a comment for one native instruction (reloc info, pc desc)
   void print_code_comment_on(outputStream* st, int column, address begin, address end);
-  static void print_statistics()                  PRODUCT_RETURN;
+  static void print_statistics();
 
   // Compiler task identification.  Note that all OSR methods
   // are numbered in an independent sequence if CICountOSR is true,
--- a/src/share/vm/runtime/java.cpp	Tue May 14 22:02:23 2013 +0200
+++ b/src/share/vm/runtime/java.cpp	Wed May 15 14:53:34 2013 +0200
@@ -249,7 +249,6 @@
     Runtime1::print_statistics();
     Deoptimization::print_statistics();
     SharedRuntime::print_statistics();
-    nmethod::print_statistics();
   }
 #endif /* COMPILER1 */
 
@@ -259,12 +258,11 @@
     Compile::print_statistics();
 #ifndef COMPILER1
     Deoptimization::print_statistics();
-    nmethod::print_statistics();
     SharedRuntime::print_statistics();
 #endif //COMPILER1
     os::print_statistics();
   }
-
+  
   if (PrintLockStatistics || PrintPreciseBiasedLockingStatistics) {
     OptoRuntime::print_named_counters();
   }
@@ -278,6 +276,10 @@
   }
 #endif // ASSERT
 #endif // COMPILER2
+
+  if (PrintNMethodStatistics) {
+    nmethod::print_statistics();
+  }
   if (CountCompiledCalls) {
     print_method_invocation_histogram();
   }
@@ -386,6 +388,9 @@
   if (PrintBiasedLockingStatistics) {
     BiasedLocking::print_counters();
   }
+  if (PrintNMethodStatistics) {
+    nmethod::print_statistics();
+  }
 
   // Native memory tracking data
   if (PrintNMTStatistics) {