# HG changeset patch # User Christian Haeubl # Date 1368519511 -7200 # Node ID cf0e311518303862098900aa8afe4652b001b495 # Parent f44d7e24cebda40265584f75692afab3d6d695b3# Parent f7148150ae37b08d0ab489b4e2693799e009c1ed Merge. diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Architecture.java --- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Architecture.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Architecture.java Tue May 14 10:18:31 2013 +0200 @@ -198,7 +198,19 @@ } } + /** + * Determine whether a kind can be stored in a register of a given category. + * + * @param category the category of the register + * @param kind the kind that should be stored in the register + */ public abstract boolean canStoreValue(RegisterCategory category, PlatformKind kind); + /** + * Return the largest kind that can be stored in a register of a given category. + * + * @param category the category of the register + * @return the largest kind that can be stored in a register {@code category} + */ public abstract PlatformKind getLargestStorableKind(RegisterCategory category); } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Register.java --- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Register.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Register.java Tue May 14 10:18:31 2013 +0200 @@ -206,20 +206,18 @@ @Override public int hashCode() { - final int prime = 17; - int result = 1; - result = prime * result + encoding; - result = prime * result + name.hashCode(); - result = prime * result + number; - result = prime * result + registerCategory.hashCode(); - return result; + return 17 + name.hashCode(); } @Override public boolean equals(Object obj) { if (obj instanceof Register) { Register other = (Register) obj; - return encoding == other.encoding && name.equals(other.name) && number == other.number && registerCategory.equals(registerCategory); + if (number == other.number && name.equals(other.name)) { + assert encoding == other.encoding; + assert registerCategory == other.registerCategory; + return true; + } } return false; } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Value.java --- a/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Value.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Value.java Tue May 14 10:18:31 2013 +0200 @@ -81,11 +81,7 @@ @Override public int hashCode() { - final int prime = 41; - int result = 1; - result = prime * result + kind.hashCode(); - result = prime * result + platformKind.hashCode(); - return result; + return 41 + platformKind.hashCode(); } @Override diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java --- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Tue May 14 10:18:31 2013 +0200 @@ -206,7 +206,7 @@ return new AMD64AddressValue(target().wordKind, baseRegister, indexRegister, scaleEnum, displacementInt); } - private AMD64AddressValue asAddress(Value address) { + protected AMD64AddressValue asAddressValue(Value address) { if (address instanceof AMD64AddressValue) { return (AMD64AddressValue) address; } else { @@ -216,7 +216,7 @@ @Override public Variable emitLoad(Kind kind, Value address, DeoptimizingNode deopting) { - AMD64AddressValue loadAddress = asAddress(address); + AMD64AddressValue loadAddress = asAddressValue(address); Variable result = newVariable(kind); append(new LoadOp(kind, result, loadAddress, deopting != null ? state(deopting) : null)); return result; @@ -224,7 +224,7 @@ @Override public void emitStore(Kind kind, Value address, Value inputVal, DeoptimizingNode deopting) { - AMD64AddressValue storeAddress = asAddress(address); + AMD64AddressValue storeAddress = asAddressValue(address); LIRFrameState state = deopting != null ? state(deopting) : null; if (isConstant(inputVal)) { diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java Tue May 14 10:18:31 2013 +0200 @@ -169,11 +169,10 @@ @Override public Value setResult(ValueNode x, Value operand) { - assert (!isVariable(operand) || x.kind() == operand.getKind()) : operand.getKind() + " for node " + x; assert (!isRegister(operand) || !attributes(asRegister(operand)).isAllocatable()); assert operand(x) == null : "operand cannot be set twice"; assert operand != null && isLegal(operand) : "operand must be legal"; - assert operand.getKind().getStackKind() == x.kind() : operand.getKind().getStackKind() + " must match " + x.kind(); + assert operand.getKind().getStackKind() == x.kind() || x.kind() == Kind.Illegal : operand.getKind().getStackKind() + " must match " + x.kind(); assert !(x instanceof VirtualObjectNode); nodeOperands.set(x, operand); return operand; diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/phases/MidTier.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/phases/MidTier.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/phases/MidTier.java Tue May 14 10:18:31 2013 +0200 @@ -72,5 +72,9 @@ addPhase(new SafepointInsertionPhase()); addPhase(new GuardLoweringPhase()); + + if (GraalOptions.OptCanonicalizer) { + addPhase(new CanonicalizerPhase()); + } } } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/HotSpotInstalledCodeTest.java --- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/HotSpotInstalledCodeTest.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.test; - -import org.junit.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.test.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.nodes.*; - -public class HotSpotInstalledCodeTest extends GraalCompilerTest { - - private static final int ITERATION_COUNT = 100000; - - @Test - public void testInstallCodeInvalidation() { - final ResolvedJavaMethod testJavaMethod = runtime.lookupJavaMethod(getMethod("foo")); - final StructuredGraph graph = parse("otherFoo"); - final HotSpotInstalledCode installedCode = (HotSpotInstalledCode) getCode(testJavaMethod, graph); - Assert.assertTrue(installedCode.isValid()); - Object result; - try { - result = installedCode.execute("a", "b", "c"); - assertEquals(43, result); - } catch (InvalidInstalledCodeException e) { - Assert.fail("Code was invalidated"); - } - Assert.assertTrue(installedCode.isValid()); - installedCode.invalidate(); - Assert.assertFalse(installedCode.isValid()); - try { - result = installedCode.execute(null, null, null); - Assert.fail("Code was not invalidated"); - } catch (InvalidInstalledCodeException e) { - } - Assert.assertFalse(installedCode.isValid()); - } - - @Test - public void testInstalledCodeCalledFromCompiledCode() { - final ResolvedJavaMethod testJavaMethod = runtime.lookupJavaMethod(getMethod("foo")); - final StructuredGraph graph = parse("otherFoo"); - final HotSpotInstalledCode installedCode = (HotSpotInstalledCode) getCode(testJavaMethod, graph); - Assert.assertTrue(installedCode.isValid()); - try { - for (int i = 0; i < ITERATION_COUNT; ++i) { - installedCode.execute("a", "b", "c"); - } - } catch (InvalidInstalledCodeException e) { - Assert.fail("Code was invalidated"); - } - } - - @SuppressWarnings("unused") - public static Object foo(Object a1, Object a2, Object a3) { - return 42; - } - - @SuppressWarnings("unused") - public static Object otherFoo(Object a1, Object a2, Object a3) { - return 43; - } -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/HotSpotNmethodTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/HotSpotNmethodTest.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.test; + +import org.junit.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.test.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.nodes.*; + +public class HotSpotNmethodTest extends GraalCompilerTest { + + private static final int ITERATION_COUNT = 100000; + + @Test + public void testInstallCodeInvalidation() { + final ResolvedJavaMethod testJavaMethod = runtime.lookupJavaMethod(getMethod("foo")); + final StructuredGraph graph = parse("otherFoo"); + final HotSpotNmethod nmethod = (HotSpotNmethod) getCode(testJavaMethod, graph); + Assert.assertTrue(nmethod.isValid()); + Object result; + try { + result = nmethod.execute("a", "b", "c"); + assertEquals(43, result); + } catch (InvalidInstalledCodeException e) { + Assert.fail("Code was invalidated"); + } + Assert.assertTrue(nmethod.isValid()); + nmethod.invalidate(); + Assert.assertFalse(nmethod.isValid()); + try { + result = nmethod.execute(null, null, null); + Assert.fail("Code was not invalidated"); + } catch (InvalidInstalledCodeException e) { + } + Assert.assertFalse(nmethod.isValid()); + } + + @Test + public void testInstalledCodeCalledFromCompiledCode() { + final ResolvedJavaMethod testJavaMethod = runtime.lookupJavaMethod(getMethod("foo")); + final StructuredGraph graph = parse("otherFoo"); + final HotSpotNmethod nmethod = (HotSpotNmethod) getCode(testJavaMethod, graph); + Assert.assertTrue(nmethod.isValid()); + try { + for (int i = 0; i < ITERATION_COUNT; ++i) { + nmethod.execute("a", "b", "c"); + } + } catch (InvalidInstalledCodeException e) { + Assert.fail("Code was invalidated"); + } + } + + @SuppressWarnings("unused") + public static Object foo(Object a1, Object a2, Object a3) { + return 42; + } + + @SuppressWarnings("unused") + public static Object otherFoo(Object a1, Object a2, Object a3) { + return 43; + } +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompilationResult.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompilationResult.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot; - -import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.code.CompilationResult.Call; -import com.oracle.graal.api.code.CompilationResult.DataPatch; -import com.oracle.graal.api.code.CompilationResult.ExceptionHandler; -import com.oracle.graal.api.code.CompilationResult.Infopoint; -import com.oracle.graal.api.code.CompilationResult.Mark; -import com.oracle.graal.api.code.CompilationResult.Site; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.stubs.*; - -/** - * Augments a {@link CompilationResult} with HotSpot-specific information. - */ -public final class HotSpotCompilationResult extends CompilerObject { - - private static final long serialVersionUID = 7807321392203253218L; - public final CompilationResult comp; - - /** - * Non-null for installation of an nmethod. - */ - public final HotSpotResolvedJavaMethod method; - public final int entryBCI; - - /** - * Non-null for installation of a RuntimeStub. - */ - public final String stubName; - - public final Site[] sites; - public final ExceptionHandler[] exceptionHandlers; - - public HotSpotCompilationResult(HotSpotResolvedJavaMethod method, int entryBCI, CompilationResult comp) { - this.method = method; - this.stubName = null; - this.comp = comp; - this.entryBCI = entryBCI; - - sites = getSortedSites(comp); - if (comp.getExceptionHandlers().isEmpty()) { - exceptionHandlers = null; - } else { - exceptionHandlers = comp.getExceptionHandlers().toArray(new ExceptionHandler[comp.getExceptionHandlers().size()]); - } - } - - public HotSpotCompilationResult(Stub stub, CompilationResult comp) { - assert checkStubInvariants(comp); - this.method = null; - this.stubName = stub.toString(); - this.comp = comp; - this.entryBCI = 0; - - sites = getSortedSites(comp); - assert comp.getExceptionHandlers().isEmpty(); - exceptionHandlers = null; - } - - /** - * Checks the conditions a compilation must satisfy to be installed as a RuntimeStub. - */ - private boolean checkStubInvariants(CompilationResult compResult) { - for (DataPatch data : compResult.getDataReferences()) { - Constant constant = data.constant; - assert constant.getKind() != Kind.Object : this + " cannot have embedded object constant: " + constant; - assert constant.getPrimitiveAnnotation() == null : this + " cannot have embedded metadata: " + constant; - } - for (Infopoint infopoint : compResult.getInfopoints()) { - assert infopoint instanceof Call : this + " cannot have non-call infopoint: " + infopoint; - Call call = (Call) infopoint; - assert call.target instanceof HotSpotRuntimeCallTarget : this + " cannot have non runtime call: " + call.target; - HotSpotRuntimeCallTarget callTarget = (HotSpotRuntimeCallTarget) call.target; - assert callTarget.getAddress() == graalRuntime().getConfig().uncommonTrapStub || callTarget.isCRuntimeCall() : this + "must only call C runtime or deoptimization stub, not " + call.target; - } - return true; - } - - static class SiteComparator implements Comparator { - - public int compare(Site s1, Site s2) { - if (s1.pcOffset == s2.pcOffset && (s1 instanceof Mark ^ s2 instanceof Mark)) { - return s1 instanceof Mark ? -1 : 1; - } - return s1.pcOffset - s2.pcOffset; - } - } - - private static Site[] getSortedSites(CompilationResult target) { - List[] lists = new List[]{target.getInfopoints(), target.getDataReferences(), target.getMarks()}; - int count = 0; - for (List list : lists) { - count += list.size(); - } - Site[] result = new Site[count]; - int pos = 0; - for (List list : lists) { - for (Object elem : list) { - result[pos++] = (Site) elem; - } - } - Arrays.sort(result, new SiteComparator()); - return result; - } -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompiledCode.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompiledCode.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.code.CompilationResult.ExceptionHandler; +import com.oracle.graal.api.code.CompilationResult.Mark; +import com.oracle.graal.api.code.CompilationResult.Site; + +/** + * A {@link CompilationResult} with additional HotSpot-specific information required for installing + * the code in HotSpot's code cache. + */ +public abstract class HotSpotCompiledCode extends CompilerObject { + + private static final long serialVersionUID = 7807321392203253218L; + public final CompilationResult comp; + + public final Site[] sites; + public final ExceptionHandler[] exceptionHandlers; + + public HotSpotCompiledCode(CompilationResult compResult) { + this.comp = compResult; + sites = getSortedSites(compResult); + if (compResult.getExceptionHandlers().isEmpty()) { + exceptionHandlers = null; + } else { + exceptionHandlers = compResult.getExceptionHandlers().toArray(new ExceptionHandler[compResult.getExceptionHandlers().size()]); + } + } + + static class SiteComparator implements Comparator { + + public int compare(Site s1, Site s2) { + if (s1.pcOffset == s2.pcOffset && (s1 instanceof Mark ^ s2 instanceof Mark)) { + return s1 instanceof Mark ? -1 : 1; + } + return s1.pcOffset - s2.pcOffset; + } + } + + private static Site[] getSortedSites(CompilationResult target) { + List[] lists = new List[]{target.getInfopoints(), target.getDataReferences(), target.getMarks()}; + int count = 0; + for (List list : lists) { + count += list.size(); + } + Site[] result = new Site[count]; + int pos = 0; + for (List list : lists) { + for (Object elem : list) { + result[pos++] = (Site) elem; + } + } + Arrays.sort(result, new SiteComparator()); + return result; + } +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompiledNmethod.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompiledNmethod.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.hotspot.meta.*; + +/** + * {@link HotSpotCompiledCode} destined for installation as an nmethod. + */ +public final class HotSpotCompiledNmethod extends HotSpotCompiledCode { + + private static final long serialVersionUID = 1492412603674834024L; + public final HotSpotResolvedJavaMethod method; + public final int entryBCI; + + public HotSpotCompiledNmethod(HotSpotResolvedJavaMethod method, int entryBCI, CompilationResult compResult) { + super(compResult); + this.method = method; + this.entryBCI = entryBCI; + } +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompiledRuntimeStub.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCompiledRuntimeStub.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot; + +import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.code.CompilationResult.Call; +import com.oracle.graal.api.code.CompilationResult.DataPatch; +import com.oracle.graal.api.code.CompilationResult.Infopoint; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.hotspot.stubs.*; + +/** + * {@link HotSpotCompiledCode} destined for installation as a RuntimeStub. + */ +public final class HotSpotCompiledRuntimeStub extends HotSpotCompiledCode { + + private static final long serialVersionUID = -4506206868419153274L; + + public final String stubName; + + public HotSpotCompiledRuntimeStub(Stub stub, CompilationResult compResult) { + super(compResult); + assert checkStubInvariants(compResult); + this.stubName = stub.toString(); + } + + /** + * Checks the conditions a compilation must satisfy to be installed as a RuntimeStub. + */ + private boolean checkStubInvariants(CompilationResult compResult) { + assert compResult.getExceptionHandlers().isEmpty(); + for (DataPatch data : compResult.getDataReferences()) { + Constant constant = data.constant; + assert constant.getKind() != Kind.Object : this + " cannot have embedded object constant: " + constant; + assert constant.getPrimitiveAnnotation() == null : this + " cannot have embedded metadata: " + constant; + } + for (Infopoint infopoint : compResult.getInfopoints()) { + assert infopoint instanceof Call : this + " cannot have non-call infopoint: " + infopoint; + Call call = (Call) infopoint; + assert call.target instanceof HotSpotRuntimeCallTarget : this + " cannot have non runtime call: " + call.target; + HotSpotRuntimeCallTarget callTarget = (HotSpotRuntimeCallTarget) call.target; + assert callTarget.getAddress() == graalRuntime().getConfig().uncommonTrapStub || callTarget.isCRuntimeCall() : this + "must only call C runtime or deoptimization stub, not " + call.target; + } + return true; + } +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVM.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVM.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVM.java Tue May 14 10:18:31 2013 +0200 @@ -150,11 +150,11 @@ /** * Installs the result of a compilation into the code cache. * - * @param compResult the result of a compilation + * @param compiledCode the result of a compilation * @param code the details of the installed CodeBlob are written to this object * @return the outcome of the installation as a {@link CodeInstallResult}. */ - CodeInstallResult installCode(HotSpotCompilationResult compResult, HotSpotInstalledCode code, SpeculationLog cache); + CodeInstallResult installCode(HotSpotCompiledCode compiledCode, HotSpotInstalledCode code, SpeculationLog cache); void initializeConfiguration(HotSpotVMConfig config); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVMImpl.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVMImpl.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVMImpl.java Tue May 14 10:18:31 2013 +0200 @@ -35,11 +35,11 @@ */ public class CompilerToVMImpl implements CompilerToVM { - private native int installCode0(HotSpotCompilationResult comp, HotSpotInstalledCode code, boolean[] triggeredDeoptimizations); + private native int installCode0(HotSpotCompiledCode compiledCode, HotSpotInstalledCode code, boolean[] triggeredDeoptimizations); @Override - public CodeInstallResult installCode(HotSpotCompilationResult comp, HotSpotInstalledCode code, SpeculationLog speculationLog) { - return CodeInstallResult.values()[installCode0(comp, code, (speculationLog == null) ? null : speculationLog.getRawMap())]; + public CodeInstallResult installCode(HotSpotCompiledCode compiledCode, HotSpotInstalledCode code, SpeculationLog speculationLog) { + return CodeInstallResult.values()[installCode0(compiledCode, code, (speculationLog == null) ? null : speculationLog.getRawMap())]; } @Override diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotInstalledCode.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotInstalledCode.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotInstalledCode.java Tue May 14 10:18:31 2013 +0200 @@ -24,123 +24,30 @@ import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; -import java.lang.reflect.*; - import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.graph.*; import com.oracle.graal.hotspot.*; -import com.oracle.graal.hotspot.stubs.*; /** - * Implementation of {@link InstalledCode} for HotSpot. If the code is installed as an nmethod (as - * opposed to some other subclass of CodeBlob such as RuntimeStub), then the nmethod stores a weak - * reference to an instance of this class. This is necessary to keep the nmethod from being unloaded - * while the associated {@link HotSpotInstalledCode} instance is alive. - *

- * Note that there is no (current) way for the reference from an nmethod to a - * {@link HotSpotInstalledCode} instance to be anything but weak. This is due to the fact that - * HotSpot does not treat nmethods as strong GC roots. + * Implementation of {@link InstalledCode} for HotSpot. */ -public class HotSpotInstalledCode extends CompilerObject implements InstalledCode { +public abstract class HotSpotInstalledCode extends CompilerObject implements InstalledCode { private static final long serialVersionUID = 156632908220561612L; - private final HotSpotResolvedJavaMethod method; - private final Stub stub; - private final boolean isDefault; - private final Graph graph; long codeBlob; long start; - public HotSpotInstalledCode(HotSpotResolvedJavaMethod method, Graph graph, boolean isDefault) { - this.method = method; - this.stub = null; - this.graph = graph; - this.isDefault = isDefault; - } - - public HotSpotInstalledCode(Stub stub) { - this.method = null; - this.stub = stub; - this.graph = null; - this.isDefault = false; - } - - public boolean isDefault() { - return isDefault; - } - public long getCodeBlob() { return codeBlob; } - public Graph getGraph() { - return graph; - } - @Override - public ResolvedJavaMethod getMethod() { - return method; - } - - @Override - public boolean isValid() { - return stub != null || graalRuntime().getCompilerToVM().isInstalledCodeValid(codeBlob); - } + public abstract String toString(); - @Override - public void invalidate() { - if (stub == null) { - graalRuntime().getCompilerToVM().invalidateInstalledCode(codeBlob); - } - } - - @Override - public String toString() { - if (stub != null) { - return String.format("InstalledCode[stub=%s, codeBlob=0x%x]", stub, codeBlob); - } - return String.format("InstalledCode[method=%s, codeBlob=0x%x, isDefault=%b]", method, codeBlob, isDefault); + public long getStart() { + return start; } - @Override - public Object execute(Object arg1, Object arg2, Object arg3) throws InvalidInstalledCodeException { - assert stub == null; - assert method.getSignature().getParameterCount(!Modifier.isStatic(method.getModifiers())) == 3; - assert method.getSignature().getParameterKind(0) == Kind.Object; - assert method.getSignature().getParameterKind(1) == Kind.Object; - assert !Modifier.isStatic(method.getModifiers()) || method.getSignature().getParameterKind(2) == Kind.Object; - return graalRuntime().getCompilerToVM().executeCompiledMethod(arg1, arg2, arg3, codeBlob); - } - - private boolean checkArgs(Object... args) { - JavaType[] sig = MetaUtil.signatureToTypes(method); - assert args.length == sig.length : MetaUtil.format("%H.%n(%p): expected ", method) + sig.length + " args, got " + args.length; - for (int i = 0; i < sig.length; i++) { - Object arg = args[i]; - if (arg == null) { - assert sig[i].getKind() == Kind.Object : MetaUtil.format("%H.%n(%p): expected arg ", method) + i + " to be Object, not " + sig[i]; - } else if (sig[i].getKind() != Kind.Object) { - assert sig[i].getKind().toBoxedJavaClass() == arg.getClass() : MetaUtil.format("%H.%n(%p): expected arg ", method) + i + " to be " + sig[i] + ", not " + arg.getClass(); - } - } - return true; - } - - @Override - public Object executeVarargs(Object... args) throws InvalidInstalledCodeException { - assert stub == null; - assert checkArgs(args); - return graalRuntime().getCompilerToVM().executeCompiledMethodVarargs(args, codeBlob); - } - - @Override - public long getStart() { - return isValid() ? start : 0; - } - - @Override public byte[] getCode() { return graalRuntime().getCompilerToVM().getCode(codeBlob); } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotNmethod.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotNmethod.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.meta; + +import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; + +import java.lang.reflect.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.graph.*; + +/** + * Implementation of {@link InstalledCode} for code installed as an nmethod. The nmethod stores a + * weak reference to an instance of this class. This is necessary to keep the nmethod from being + * unloaded while the associated {@link HotSpotNmethod} instance is alive. + *

+ * Note that there is no (current) way for the reference from an nmethod to a {@link HotSpotNmethod} + * instance to be anything but weak. This is due to the fact that HotSpot does not treat nmethods as + * strong GC roots. + */ +public class HotSpotNmethod extends HotSpotInstalledCode { + + private static final long serialVersionUID = -1784683588947054103L; + + private final HotSpotResolvedJavaMethod method; + private final boolean isDefault; + private final Graph graph; + + public HotSpotNmethod(HotSpotResolvedJavaMethod method, Graph graph, boolean isDefault) { + this.method = method; + this.graph = graph; + this.isDefault = isDefault; + } + + public boolean isDefault() { + return isDefault; + } + + public Graph getGraph() { + return graph; + } + + @Override + public ResolvedJavaMethod getMethod() { + return method; + } + + @Override + public boolean isValid() { + return graalRuntime().getCompilerToVM().isInstalledCodeValid(codeBlob); + } + + @Override + public void invalidate() { + graalRuntime().getCompilerToVM().invalidateInstalledCode(codeBlob); + } + + @Override + public String toString() { + return String.format("InstalledNmethod[method=%s, codeBlob=0x%x, isDefault=%b]", method, codeBlob, isDefault); + } + + @Override + public Object execute(Object arg1, Object arg2, Object arg3) throws InvalidInstalledCodeException { + assert method.getSignature().getParameterCount(!Modifier.isStatic(method.getModifiers())) == 3; + assert method.getSignature().getParameterKind(0) == Kind.Object; + assert method.getSignature().getParameterKind(1) == Kind.Object; + assert !Modifier.isStatic(method.getModifiers()) || method.getSignature().getParameterKind(2) == Kind.Object; + return graalRuntime().getCompilerToVM().executeCompiledMethod(arg1, arg2, arg3, codeBlob); + } + + private boolean checkArgs(Object... args) { + JavaType[] sig = MetaUtil.signatureToTypes(method); + assert args.length == sig.length : MetaUtil.format("%H.%n(%p): expected ", method) + sig.length + " args, got " + args.length; + for (int i = 0; i < sig.length; i++) { + Object arg = args[i]; + if (arg == null) { + assert sig[i].getKind() == Kind.Object : MetaUtil.format("%H.%n(%p): expected arg ", method) + i + " to be Object, not " + sig[i]; + } else if (sig[i].getKind() != Kind.Object) { + assert sig[i].getKind().toBoxedJavaClass() == arg.getClass() : MetaUtil.format("%H.%n(%p): expected arg ", method) + i + " to be " + sig[i] + ", not " + arg.getClass(); + } + } + return true; + } + + @Override + public Object executeVarargs(Object... args) throws InvalidInstalledCodeException { + assert checkArgs(args); + return graalRuntime().getCompilerToVM().executeCompiledMethodVarargs(args, codeBlob); + } + + @Override + public long getStart() { + return isValid() ? start : 0; + } +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Tue May 14 10:18:31 2013 +0200 @@ -42,19 +42,14 @@ import static com.oracle.graal.hotspot.nodes.WriteBarrierPreStubCall.*; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.IDENTITY_HASHCODE; import static com.oracle.graal.hotspot.replacements.SystemSubstitutions.*; -import static com.oracle.graal.hotspot.stubs.CreateNullPointerExceptionStub.*; -import static com.oracle.graal.hotspot.stubs.CreateOutOfBoundsExceptionStub.*; import static com.oracle.graal.hotspot.stubs.ExceptionHandlerStub.*; import static com.oracle.graal.hotspot.stubs.LogObjectStub.*; import static com.oracle.graal.hotspot.stubs.LogPrimitiveStub.*; import static com.oracle.graal.hotspot.stubs.LogPrintfStub.*; -import static com.oracle.graal.hotspot.stubs.MonitorEnterStub.*; -import static com.oracle.graal.hotspot.stubs.MonitorExitStub.*; import static com.oracle.graal.hotspot.stubs.NewArrayStub.*; import static com.oracle.graal.hotspot.stubs.NewInstanceStub.*; import static com.oracle.graal.hotspot.stubs.NewMultiArrayStub.*; import static com.oracle.graal.hotspot.stubs.OSRMigrationEndStub.*; -import static com.oracle.graal.hotspot.stubs.RegisterFinalizerStub.*; import static com.oracle.graal.hotspot.stubs.StubUtil.*; import static com.oracle.graal.hotspot.stubs.ThreadIsInterruptedStub.*; import static com.oracle.graal.hotspot.stubs.UnwindExceptionToCallerStub.*; @@ -255,14 +250,6 @@ /* ret */ ret(word), /* arg0: thread */ nativeCallingConvention(word, /* arg1: returnAddress */ word)); - addStubCall(REGISTER_FINALIZER, - /* ret */ ret(Kind.Void), - /* arg0: object */ javaCallingConvention(Kind.Object)); - - addCRuntimeCall(REGISTER_FINALIZER_C, config.registerFinalizerAddress, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word, - /* arg1: object */ Kind.Object)); addStubCall(NEW_ARRAY, /* ret */ ret(Kind.Object), @@ -386,44 +373,6 @@ /* temps */ null, /* ret */ ret(Kind.Void)); - addStubCall(MONITORENTER, - /* ret */ ret(Kind.Void), - /* arg0: object */ javaCallingConvention(Kind.Object, - /* arg1: lock */ word)); - - addCRuntimeCall(MONITORENTER_C, config.monitorenterAddress, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word, - /* arg1: object */ Kind.Object, - /* arg1: lock */ word)); - - addStubCall(MONITOREXIT, - /* ret */ ret(Kind.Void), - /* arg0: object */ javaCallingConvention(Kind.Object, - /* arg1: lock */ word)); - - addCRuntimeCall(MONITOREXIT_C, config.monitorexitAddress, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word, - /* arg1: object */ Kind.Object, - /* arg1: lock */ word)); - - addStubCall(CREATE_NULL_POINTER_EXCEPTION, - /* ret */ ret(Kind.Object)); - - addCRuntimeCall(CREATE_NULL_POINTER_EXCEPTION_C, config.createNullPointerExceptionAddress, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word)); - - addStubCall(CREATE_OUT_OF_BOUNDS_EXCEPTION, - /* ret */ ret(Kind.Object), - /* arg0: index */ javaCallingConvention(Kind.Int)); - - addCRuntimeCall(CREATE_OUT_OF_BOUNDS_C, config.createOutOfBoundsExceptionAddress, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word, - /* arg1: index */ Kind.Int)); - addStubCall(VM_ERROR, /* ret */ ret(Kind.Void), /* arg0: where */ javaCallingConvention(Kind.Object, @@ -570,16 +519,11 @@ link(new NewInstanceStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(NEW_INSTANCE))); link(new NewArrayStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(NEW_ARRAY))); link(new NewMultiArrayStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(NEW_MULTI_ARRAY))); - link(new RegisterFinalizerStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(REGISTER_FINALIZER))); link(new ThreadIsInterruptedStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(THREAD_IS_INTERRUPTED))); link(new ExceptionHandlerStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(EXCEPTION_HANDLER))); link(new UnwindExceptionToCallerStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(UNWIND_EXCEPTION_TO_CALLER))); link(new VerifyOopStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(VERIFY_OOP))); link(new OSRMigrationEndStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(OSR_MIGRATION_END))); - link(new MonitorEnterStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(MONITORENTER))); - link(new MonitorExitStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(MONITOREXIT))); - link(new CreateNullPointerExceptionStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(CREATE_NULL_POINTER_EXCEPTION))); - link(new CreateOutOfBoundsExceptionStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(CREATE_OUT_OF_BOUNDS_EXCEPTION))); link(new LogPrimitiveStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(LOG_PRIMITIVE))); link(new LogObjectStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(LOG_OBJECT))); link(new LogPrintfStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(LOG_PRINTF))); @@ -587,15 +531,20 @@ link(new WriteBarrierPreStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(WRITE_BARRIER_PRE))); link(new WriteBarrierPostStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(WRITE_BARRIER_POST))); - CompilerToVM c2vm = graalRuntime.getCompilerToVM(); - link(new RuntimeCallStub(config.identityHashCodeAddress, IDENTITY_HASHCODE, true, this, replacements, globalStubRegConfig, c2vm)); + linkRuntimeCall(IDENTITY_HASHCODE, config.identityHashCodeAddress, replacements); + linkRuntimeCall(REGISTER_FINALIZER, config.registerFinalizerAddress, replacements); + linkRuntimeCall(CREATE_NULL_POINTER_EXCEPTION, config.createNullPointerExceptionAddress, replacements); + linkRuntimeCall(CREATE_OUT_OF_BOUNDS_EXCEPTION, config.createOutOfBoundsExceptionAddress, replacements); + linkRuntimeCall(MONITORENTER, config.monitorenterAddress, replacements); + linkRuntimeCall(MONITOREXIT, config.monitorexitAddress, replacements); } private static void link(Stub stub) { stub.getLinkage().setStub(stub); } - private void link(RuntimeCallStub stub) { + private void linkRuntimeCall(Descriptor descriptor, long address, Replacements replacements) { + RuntimeCallStub stub = new RuntimeCallStub(address, descriptor, true, this, replacements, globalStubRegConfig, graalRuntime.getCompilerToVM()); HotSpotRuntimeCallTarget linkage = stub.getLinkage(); HotSpotRuntimeCallTarget targetLinkage = stub.getTargetLinkage(); linkage.setStub(stub); @@ -1158,8 +1107,8 @@ } public HotSpotInstalledCode installMethod(HotSpotResolvedJavaMethod method, Graph graph, int entryBCI, CompilationResult compResult) { - HotSpotInstalledCode installedCode = new HotSpotInstalledCode(method, graph, true); - graalRuntime.getCompilerToVM().installCode(new HotSpotCompilationResult(method, entryBCI, compResult), installedCode, method.getSpeculationLog()); + HotSpotInstalledCode installedCode = new HotSpotNmethod(method, graph, true); + graalRuntime.getCompilerToVM().installCode(new HotSpotCompiledNmethod(method, entryBCI, compResult), installedCode, method.getSpeculationLog()); return installedCode; } @@ -1171,8 +1120,8 @@ @Override public InstalledCode addMethod(ResolvedJavaMethod method, CompilationResult compResult, Graph graph) { HotSpotResolvedJavaMethod hotspotMethod = (HotSpotResolvedJavaMethod) method; - HotSpotInstalledCode code = new HotSpotInstalledCode(hotspotMethod, graph, false); - CodeInstallResult result = graalRuntime.getCompilerToVM().installCode(new HotSpotCompilationResult(hotspotMethod, -1, compResult), code, null); + HotSpotInstalledCode code = new HotSpotNmethod(hotspotMethod, graph, false); + CodeInstallResult result = graalRuntime.getCompilerToVM().installCode(new HotSpotCompiledNmethod(hotspotMethod, -1, compResult), code, null); if (result != CodeInstallResult.OK) { return null; } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntimeStub.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntimeStub.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.meta; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.graph.*; +import com.oracle.graal.hotspot.stubs.*; + +/** + * Implementation of {@link InstalledCode} for code installed as a RuntimeStub. + */ +public class HotSpotRuntimeStub extends HotSpotInstalledCode { + + private static final long serialVersionUID = -6388648408298441748L; + + private final Stub stub; + + public HotSpotRuntimeStub(Stub stub) { + this.stub = stub; + } + + public ResolvedJavaMethod getMethod() { + return null; + } + + public boolean isValid() { + return true; + } + + public void invalidate() { + } + + @Override + public String toString() { + return String.format("InstalledRuntimeStub[stub=%s, codeBlob=0x%x]", stub, codeBlob); + } + + public Object execute(Object arg1, Object arg2, Object arg3) throws InvalidInstalledCodeException { + throw new GraalInternalError("Cannot call stub %s", stub); + } + + public Object executeVarargs(Object... args) throws InvalidInstalledCodeException { + throw new GraalInternalError("Cannot call stub %s", stub); + } +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/HotSpotInstalledCodeExecuteNode.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/HotSpotInstalledCodeExecuteNode.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.nodes; - -import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.replacements.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.nodes.extended.*; -import com.oracle.graal.nodes.extended.LocationNode.LocationIdentity; -import com.oracle.graal.nodes.java.*; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.nodes.type.*; -import com.oracle.graal.phases.common.*; - -public class HotSpotInstalledCodeExecuteNode extends AbstractCallNode implements Lowerable { - - @Input private final ValueNode code; - private final Class[] signature; - - public HotSpotInstalledCodeExecuteNode(Kind kind, Class[] signature, ValueNode code, ValueNode arg1, ValueNode arg2, ValueNode arg3) { - super(StampFactory.forKind(kind), new ValueNode[]{arg1, arg2, arg3}); - this.code = code; - this.signature = signature; - } - - @Override - public LocationIdentity[] getLocationIdentities() { - return new LocationIdentity[]{LocationNode.ANY_LOCATION}; - } - - @Override - public void lower(LoweringTool tool, LoweringType loweringType) { - if (code.isConstant() && code.asConstant().asObject() instanceof HotSpotInstalledCode) { - HotSpotInstalledCode hsCode = (HotSpotInstalledCode) code.asConstant().asObject(); - InvokeNode invoke = replaceWithInvoke(tool.getRuntime()); - StructuredGraph graph = (StructuredGraph) hsCode.getGraph(); - if (graph != null) { - InliningUtil.inline(invoke, (StructuredGraph) hsCode.getGraph(), false); - } - } else { - replaceWithInvoke(tool.getRuntime()); - } - } - - protected InvokeNode replaceWithInvoke(MetaAccessProvider tool) { - ResolvedJavaMethod method = null; - ResolvedJavaField methodField = null; - ResolvedJavaField metaspaceMethodField = null; - ResolvedJavaField codeBlobField = null; - try { - method = tool.lookupJavaMethod(HotSpotInstalledCodeExecuteNode.class.getMethod("placeholder", Object.class, Object.class, Object.class)); - methodField = tool.lookupJavaField(HotSpotInstalledCode.class.getDeclaredField("method")); - codeBlobField = tool.lookupJavaField(HotSpotInstalledCode.class.getDeclaredField("codeBlob")); - metaspaceMethodField = tool.lookupJavaField(HotSpotResolvedJavaMethod.class.getDeclaredField("metaspaceMethod")); - } catch (NoSuchMethodException | SecurityException | NoSuchFieldException e) { - throw new IllegalStateException(e); - } - ResolvedJavaType[] signatureTypes = new ResolvedJavaType[signature.length]; - for (int i = 0; i < signature.length; i++) { - signatureTypes[i] = tool.lookupJavaType(signature[i]); - } - final int verifiedEntryPointOffset = HotSpotReplacementsUtil.verifiedEntryPointOffset(); - - LoadFieldNode loadCodeBlob = graph().add(new LoadFieldNode(code, codeBlobField)); - UnsafeLoadNode load = graph().add(new UnsafeLoadNode(loadCodeBlob, verifiedEntryPointOffset, ConstantNode.forLong(0, graph()), graalRuntime().getTarget().wordKind)); - - LoadFieldNode loadMethod = graph().add(new LoadFieldNode(code, methodField)); - LoadFieldNode loadmetaspaceMethod = graph().add(new LoadFieldNode(loadMethod, metaspaceMethodField)); - - HotSpotIndirectCallTargetNode callTarget = graph().add( - new HotSpotIndirectCallTargetNode(loadmetaspaceMethod, load, arguments, stamp(), signatureTypes, method, CallingConvention.Type.JavaCall)); - - InvokeNode invoke = graph().add(new InvokeNode(callTarget, 0)); - - invoke.setStateAfter(stateAfter()); - graph().replaceFixedWithFixed(this, invoke); - - graph().addBeforeFixed(invoke, loadmetaspaceMethod); - graph().addBeforeFixed(loadmetaspaceMethod, loadMethod); - graph().addBeforeFixed(invoke, load); - graph().addBeforeFixed(load, loadCodeBlob); - - return invoke; - } - - public static Object placeholder(@SuppressWarnings("unused") Object a1, @SuppressWarnings("unused") Object a2, @SuppressWarnings("unused") Object a3) { - return 1; - } - - @NodeIntrinsic - public static native T call(@ConstantNodeParameter Kind kind, @ConstantNodeParameter Class[] signature, Object code, Object arg1, Object arg2, Object arg3); - -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/HotSpotNmethodExecuteNode.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/HotSpotNmethodExecuteNode.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.nodes; + +import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.hotspot.replacements.*; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.extended.*; +import com.oracle.graal.nodes.extended.LocationNode.LocationIdentity; +import com.oracle.graal.nodes.java.*; +import com.oracle.graal.nodes.spi.*; +import com.oracle.graal.nodes.type.*; +import com.oracle.graal.phases.common.*; + +public class HotSpotNmethodExecuteNode extends AbstractCallNode implements Lowerable { + + @Input private final ValueNode code; + private final Class[] signature; + + public HotSpotNmethodExecuteNode(Kind kind, Class[] signature, ValueNode code, ValueNode arg1, ValueNode arg2, ValueNode arg3) { + super(StampFactory.forKind(kind), new ValueNode[]{arg1, arg2, arg3}); + this.code = code; + this.signature = signature; + } + + @Override + public LocationIdentity[] getLocationIdentities() { + return new LocationIdentity[]{LocationNode.ANY_LOCATION}; + } + + @Override + public void lower(LoweringTool tool, LoweringType loweringType) { + if (code.isConstant() && code.asConstant().asObject() instanceof HotSpotNmethod) { + HotSpotNmethod nmethod = (HotSpotNmethod) code.asConstant().asObject(); + InvokeNode invoke = replaceWithInvoke(tool.getRuntime()); + StructuredGraph graph = (StructuredGraph) nmethod.getGraph(); + if (graph != null) { + InliningUtil.inline(invoke, (StructuredGraph) nmethod.getGraph(), false); + } + } else { + replaceWithInvoke(tool.getRuntime()); + } + } + + protected InvokeNode replaceWithInvoke(MetaAccessProvider tool) { + ResolvedJavaMethod method = null; + ResolvedJavaField methodField = null; + ResolvedJavaField metaspaceMethodField = null; + ResolvedJavaField codeBlobField = null; + try { + method = tool.lookupJavaMethod(HotSpotNmethodExecuteNode.class.getMethod("placeholder", Object.class, Object.class, Object.class)); + methodField = tool.lookupJavaField(HotSpotNmethod.class.getDeclaredField("method")); + codeBlobField = tool.lookupJavaField(HotSpotInstalledCode.class.getDeclaredField("codeBlob")); + metaspaceMethodField = tool.lookupJavaField(HotSpotResolvedJavaMethod.class.getDeclaredField("metaspaceMethod")); + } catch (NoSuchMethodException | SecurityException | NoSuchFieldException e) { + throw new IllegalStateException(e); + } + ResolvedJavaType[] signatureTypes = new ResolvedJavaType[signature.length]; + for (int i = 0; i < signature.length; i++) { + signatureTypes[i] = tool.lookupJavaType(signature[i]); + } + final int verifiedEntryPointOffset = HotSpotReplacementsUtil.verifiedEntryPointOffset(); + + LoadFieldNode loadCodeBlob = graph().add(new LoadFieldNode(code, codeBlobField)); + UnsafeLoadNode load = graph().add(new UnsafeLoadNode(loadCodeBlob, verifiedEntryPointOffset, ConstantNode.forLong(0, graph()), graalRuntime().getTarget().wordKind)); + + LoadFieldNode loadMethod = graph().add(new LoadFieldNode(code, methodField)); + LoadFieldNode loadmetaspaceMethod = graph().add(new LoadFieldNode(loadMethod, metaspaceMethodField)); + + HotSpotIndirectCallTargetNode callTarget = graph().add( + new HotSpotIndirectCallTargetNode(loadmetaspaceMethod, load, arguments, stamp(), signatureTypes, method, CallingConvention.Type.JavaCall)); + + InvokeNode invoke = graph().add(new InvokeNode(callTarget, 0)); + + invoke.setStateAfter(stateAfter()); + graph().replaceFixedWithFixed(this, invoke); + + graph().addBeforeFixed(invoke, loadmetaspaceMethod); + graph().addBeforeFixed(loadmetaspaceMethod, loadMethod); + graph().addBeforeFixed(invoke, load); + graph().addBeforeFixed(load, loadCodeBlob); + + return invoke; + } + + public static Object placeholder(@SuppressWarnings("unused") Object a1, @SuppressWarnings("unused") Object a2, @SuppressWarnings("unused") Object a3) { + return 1; + } + + @NodeIntrinsic + public static native T call(@ConstantNodeParameter Kind kind, @ConstantNodeParameter Class[] signature, Object code, Object arg1, Object arg2, Object arg3); + +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorEnterStubCall.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorEnterStubCall.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorEnterStubCall.java Tue May 14 10:18:31 2013 +0200 @@ -31,7 +31,7 @@ import com.oracle.graal.word.*; /** - * Node implementing a call to HotSpot's {@code graal_monitorenter} stub. + * Node implementing a call to {@code GraalRuntime::monitorenter}. */ public class MonitorEnterStubCall extends DeoptimizingStubCall implements LIRGenLowerable { @@ -52,5 +52,5 @@ } @NodeIntrinsic - public static native void call(Object hub, Word lock); + public static native void call(Object object, Word lock); } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorExitStubCall.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorExitStubCall.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/MonitorExitStubCall.java Tue May 14 10:18:31 2013 +0200 @@ -32,7 +32,7 @@ import com.oracle.graal.word.*; /** - * Node implementing a call to HotSpot's {@code graal_monitorexit} stub. + * Node implementing a call to {@code GraalRuntime::monitorexit}. */ public class MonitorExitStubCall extends DeoptimizingStubCall implements LIRGenLowerable { @@ -56,5 +56,5 @@ } @NodeIntrinsic - public static native void call(Object hub, @ConstantNodeParameter int lockDepth); + public static native void call(Object object, @ConstantNodeParameter int lockDepth); } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotInstalledCodeIntrinsics.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotInstalledCodeIntrinsics.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.replacements; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.api.runtime.*; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.phases.*; - -@ServiceProvider(ReplacementsProvider.class) -public class HotSpotInstalledCodeIntrinsics implements ReplacementsProvider { - - @Override - public void registerReplacements(MetaAccessProvider runtime, Replacements replacements, TargetDescription target) { - if (GraalOptions.IntrinsifyInstalledCodeMethods) { - replacements.registerSubstitutions(HotSpotInstalledCodeSubstitutions.class); - } - } -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotInstalledCodeSubstitutions.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotInstalledCodeSubstitutions.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.replacements; - -import com.oracle.graal.api.meta.*; -import com.oracle.graal.api.replacements.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.nodes.*; -import com.oracle.graal.replacements.Snippet.Fold; - -@ClassSubstitution(HotSpotInstalledCode.class) -public class HotSpotInstalledCodeSubstitutions { - - @MethodSubstitution(isStatic = false) - public static Object execute(HotSpotInstalledCode code, final Object arg1, final Object arg2, final Object arg3) { - return HotSpotInstalledCodeExecuteNode.call(Kind.Object, getSignature(), code, arg1, arg2, arg3); - } - - @Fold - private static Class[] getSignature() { - return new Class[]{Object.class, Object.class, Object.class}; - } - -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotNmethodIntrinsics.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotNmethodIntrinsics.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.replacements; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.api.runtime.*; +import com.oracle.graal.nodes.spi.*; +import com.oracle.graal.phases.*; + +@ServiceProvider(ReplacementsProvider.class) +public class HotSpotNmethodIntrinsics implements ReplacementsProvider { + + @Override + public void registerReplacements(MetaAccessProvider runtime, Replacements replacements, TargetDescription target) { + if (GraalOptions.IntrinsifyInstalledCodeMethods) { + replacements.registerSubstitutions(HotSpotNmethodSubstitutions.class); + } + } +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotNmethodSubstitutions.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotNmethodSubstitutions.java Tue May 14 10:18:31 2013 +0200 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.replacements; + +import com.oracle.graal.api.meta.*; +import com.oracle.graal.api.replacements.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.hotspot.nodes.*; +import com.oracle.graal.replacements.Snippet.Fold; + +@ClassSubstitution(HotSpotNmethod.class) +public class HotSpotNmethodSubstitutions { + + @MethodSubstitution(isStatic = false) + public static Object execute(HotSpotInstalledCode code, final Object arg1, final Object arg2, final Object arg3) { + return HotSpotNmethodExecuteNode.call(Kind.Object, getSignature(), code, arg1, arg2, arg3); + } + + @Fold + private static Class[] getSignature() { + return new Class[]{Object.class, Object.class, Object.class}; + } + +} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/CreateNullPointerExceptionStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/CreateNullPointerExceptionStub.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.stubs; - -import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.code.RuntimeCallTarget.*; -import com.oracle.graal.graph.Node.*; -import com.oracle.graal.hotspot.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.nodes.*; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.replacements.*; -import com.oracle.graal.word.*; - -/** - * Stub called to create a {@link NullPointerException}. - */ -public class CreateNullPointerExceptionStub extends CRuntimeStub { - - public CreateNullPointerExceptionStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage); - } - - @Snippet - private static Object createNullPointerException() { - createNullPointerExceptionC(CREATE_NULL_POINTER_EXCEPTION_C, thread()); - StubUtil.handlePendingException(true); - return StubUtil.verifyObject(getAndClearObjectResult(thread())); - } - - public static final Descriptor CREATE_NULL_POINTER_EXCEPTION_C = StubUtil.descriptorFor(CreateNullPointerExceptionStub.class, "createNullPointerExceptionC", false); - - @NodeIntrinsic(CRuntimeCall.class) - public static native void createNullPointerExceptionC(@ConstantNodeParameter Descriptor createNullPointerExceptionC, Word thread); - -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/CreateOutOfBoundsExceptionStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/CreateOutOfBoundsExceptionStub.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.stubs; - -import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.code.RuntimeCallTarget.*; -import com.oracle.graal.graph.Node.*; -import com.oracle.graal.hotspot.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.nodes.*; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.replacements.*; -import com.oracle.graal.word.*; - -/** - * Stub called to create a {@link ArrayIndexOutOfBoundsException}. - */ -public class CreateOutOfBoundsExceptionStub extends CRuntimeStub { - - public CreateOutOfBoundsExceptionStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage); - } - - @Snippet - private static Object createOutOfBoundsException(int index) { - createOutOfBoundsExceptionC(CREATE_OUT_OF_BOUNDS_C, thread(), index); - StubUtil.handlePendingException(true); - return StubUtil.verifyObject(getAndClearObjectResult(thread())); - } - - public static final Descriptor CREATE_OUT_OF_BOUNDS_C = StubUtil.descriptorFor(CreateOutOfBoundsExceptionStub.class, "createOutOfBoundsExceptionC", false); - - @NodeIntrinsic(CRuntimeCall.class) - public static native void createOutOfBoundsExceptionC(@ConstantNodeParameter Descriptor createOutOfBoundsExceptionC, Word thread, int index); - -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ExceptionHandlerStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ExceptionHandlerStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ExceptionHandlerStub.java Tue May 14 10:18:31 2013 +0200 @@ -25,6 +25,7 @@ import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; import static com.oracle.graal.hotspot.nodes.PatchReturnAddressNode.*; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -70,11 +71,11 @@ writeExceptionOop(thread(), exception); writeExceptionPc(thread(), exceptionPc); if (logging()) { - StubUtil.printf("handling exception %p (", Word.fromObject(exception).rawValue()); - StubUtil.decipher(Word.fromObject(exception).rawValue()); - StubUtil.printf(") at %p (", Word.fromObject(exception).rawValue(), exceptionPc.rawValue()); - StubUtil.decipher(exceptionPc.rawValue()); - StubUtil.printf(")\n"); + printf("handling exception %p (", Word.fromObject(exception).rawValue()); + decipher(Word.fromObject(exception).rawValue()); + printf(") at %p (", Word.fromObject(exception).rawValue(), exceptionPc.rawValue()); + decipher(exceptionPc.rawValue()); + printf(")\n"); } // patch throwing pc into return address so that deoptimization finds the right debug info @@ -83,9 +84,9 @@ Word handlerPc = exceptionHandlerForPc(EXCEPTION_HANDLER_FOR_PC, thread()); if (logging()) { - StubUtil.printf("handler for exception %p at %p is at %p (", Word.fromObject(exception).rawValue(), exceptionPc.rawValue(), handlerPc.rawValue()); - StubUtil.decipher(handlerPc.rawValue()); - StubUtil.printf(")\n"); + printf("handler for exception %p at %p is at %p (", Word.fromObject(exception).rawValue(), exceptionPc.rawValue(), handlerPc.rawValue()); + decipher(handlerPc.rawValue()); + printf(")\n"); } // patch the return address so that this stub returns to the exception handler @@ -96,18 +97,18 @@ if (enabled) { Object currentException = readExceptionOop(thread()); if (currentException != null) { - StubUtil.fatal("exception object in thread must be null, not %p", Word.fromObject(currentException).rawValue()); + fatal("exception object in thread must be null, not %p", Word.fromObject(currentException).rawValue()); } Word currentExceptionPc = readExceptionPc(thread()); if (currentExceptionPc.notEqual(Word.zero())) { - StubUtil.fatal("exception PC in thread must be zero, not %p", currentExceptionPc.rawValue()); + fatal("exception PC in thread must be zero, not %p", currentExceptionPc.rawValue()); } } } static void checkExceptionNotNull(boolean enabled, Object exception) { if (enabled && exception == null) { - StubUtil.fatal("exception must not be null"); + fatal("exception must not be null"); } } @@ -124,7 +125,7 @@ return enabled || graalRuntime().getConfig().cAssertions; } - public static final Descriptor EXCEPTION_HANDLER_FOR_PC = StubUtil.descriptorFor(ExceptionHandlerStub.class, "exceptionHandlerForPc", false); + public static final Descriptor EXCEPTION_HANDLER_FOR_PC = descriptorFor(ExceptionHandlerStub.class, "exceptionHandlerForPc", false); @NodeIntrinsic(value = CRuntimeCall.class, setStampFromReturnType = true) public static native Word exceptionHandlerForPc(@ConstantNodeParameter Descriptor exceptionHandlerForPc, Word thread); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogObjectStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogObjectStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogObjectStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -49,7 +50,7 @@ logObjectC(LOG_OBJECT_C, thread(), object, flags); } - public static final Descriptor LOG_OBJECT_C = StubUtil.descriptorFor(LogObjectStub.class, "logObjectC", false); + public static final Descriptor LOG_OBJECT_C = descriptorFor(LogObjectStub.class, "logObjectC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void logObjectC(@ConstantNodeParameter Descriptor logObjectC, Word thread, Object object, int flags); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogPrimitiveStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogPrimitiveStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogPrimitiveStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -49,7 +50,7 @@ logPrimitivefC(LOG_PRIMITIVE_C, thread(), typeChar, value, newline); } - public static final Descriptor LOG_PRIMITIVE_C = StubUtil.descriptorFor(LogPrimitiveStub.class, "logPrimitivefC", false); + public static final Descriptor LOG_PRIMITIVE_C = descriptorFor(LogPrimitiveStub.class, "logPrimitivefC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void logPrimitivefC(@ConstantNodeParameter Descriptor logPrimitivefC, Word thread, char typeChar, long value, boolean newline); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogPrintfStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogPrintfStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/LogPrintfStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -49,7 +50,7 @@ logPrintfC(LOG_PRINTF_C, thread(), format, v1, v2, v3); } - public static final Descriptor LOG_PRINTF_C = StubUtil.descriptorFor(LogPrintfStub.class, "logPrintfC", false); + public static final Descriptor LOG_PRINTF_C = descriptorFor(LogPrintfStub.class, "logPrintfC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void logPrintfC(@ConstantNodeParameter Descriptor logPrintfC, Word thread, String format, long v1, long v2, long v3); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/MonitorEnterStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/MonitorEnterStub.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.stubs; - -import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; - -import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; -import com.oracle.graal.api.code.*; -import com.oracle.graal.graph.Node.ConstantNodeParameter; -import com.oracle.graal.graph.Node.NodeIntrinsic; -import com.oracle.graal.hotspot.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.nodes.*; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.replacements.*; -import com.oracle.graal.word.*; - -/** - * Stub called from {@link MonitorEnterStubCall}. - */ -public class MonitorEnterStub extends CRuntimeStub { - - public MonitorEnterStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage); - } - - @Snippet - private static void monitorenter(Object object, Word lock) { - monitorenterC(MONITORENTER_C, thread(), object, lock); - StubUtil.handlePendingException(false); - } - - public static final Descriptor MONITORENTER_C = StubUtil.descriptorFor(MonitorEnterStub.class, "monitorenterC", false); - - @NodeIntrinsic(CRuntimeCall.class) - public static native void monitorenterC(@ConstantNodeParameter Descriptor monitorenterC, Word thread, Object object, Word lock); -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/MonitorExitStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/MonitorExitStub.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.stubs; - -import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; - -import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; -import com.oracle.graal.api.code.*; -import com.oracle.graal.graph.Node.ConstantNodeParameter; -import com.oracle.graal.graph.Node.NodeIntrinsic; -import com.oracle.graal.hotspot.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.nodes.*; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.replacements.*; -import com.oracle.graal.word.*; - -/** - * Stub called from {@link MonitorExitStubCall}. - */ -public class MonitorExitStub extends CRuntimeStub { - - public MonitorExitStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage); - } - - @Snippet - private static void monitorexit(Object object, Word lock) { - monitorexitC(MONITOREXIT_C, thread(), object, lock); - } - - public static final Descriptor MONITOREXIT_C = StubUtil.descriptorFor(MonitorExitStub.class, "monitorexitC", false); - - @NodeIntrinsic(CRuntimeCall.class) - public static native void monitorexitC(@ConstantNodeParameter Descriptor monitorexitC, Word thread, Object object, Word lock); -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java Tue May 14 10:18:31 2013 +0200 @@ -22,24 +22,25 @@ */ package com.oracle.graal.hotspot.stubs; -import static com.oracle.graal.api.code.DeoptimizationAction.*; -import static com.oracle.graal.api.meta.DeoptimizationReason.*; import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; import static com.oracle.graal.hotspot.replacements.NewObjectSnippets.*; import static com.oracle.graal.hotspot.stubs.NewInstanceStub.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; -import com.oracle.graal.graph.Node.*; +import com.oracle.graal.graph.Node.ConstantNodeParameter; +import com.oracle.graal.graph.Node.NodeIntrinsic; import com.oracle.graal.hotspot.*; import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.hotspot.replacements.*; import com.oracle.graal.nodes.spi.*; import com.oracle.graal.replacements.*; -import com.oracle.graal.replacements.Snippet.*; +import com.oracle.graal.replacements.Snippet.ConstantParameter; +import com.oracle.graal.replacements.Snippet.Fold; import com.oracle.graal.replacements.SnippetTemplate.Arguments; import com.oracle.graal.replacements.SnippetTemplate.SnippetInfo; import com.oracle.graal.word.*; @@ -94,10 +95,10 @@ int elementKind = (layoutHelper >> layoutHelperElementTypeShift()) & layoutHelperElementTypeMask(); int sizeInBytes = computeArrayAllocationSize(length, wordSize(), headerSize, log2ElementSize); if (logging()) { - StubUtil.printf("newArray: element kind %d\n", elementKind); - StubUtil.printf("newArray: array length %d\n", length); - StubUtil.printf("newArray: array size %d\n", sizeInBytes); - StubUtil.printf("newArray: hub=%p\n", hub.rawValue()); + printf("newArray: element kind %d\n", elementKind); + printf("newArray: array length %d\n", length); + printf("newArray: array size %d\n", sizeInBytes); + printf("newArray: hub=%p\n", hub.rawValue()); } // check that array length is small enough for fast path. @@ -105,29 +106,22 @@ Word memory = refillAllocate(intArrayHub, sizeInBytes, logging()); if (memory.notEqual(0)) { if (logging()) { - StubUtil.printf("newArray: allocated new array at %p\n", memory.rawValue()); + printf("newArray: allocated new array at %p\n", memory.rawValue()); } formatArray(hub, sizeInBytes, length, headerSize, memory, Word.unsigned(arrayPrototypeMarkWord()), true); - return StubUtil.verifyObject(memory.toObject()); + return verifyObject(memory.toObject()); } } if (logging()) { - StubUtil.printf("newArray: calling new_array_c\n"); + printf("newArray: calling new_array_c\n"); } newArrayC(NEW_ARRAY_C, thread(), hub, length); - - if (clearPendingException(thread())) { - if (logging()) { - StubUtil.printf("newArray: deoptimizing to caller\n"); - } - getAndClearObjectResult(thread()); - DeoptimizeCallerNode.deopt(InvalidateReprofile, RuntimeConstraint); - } - return StubUtil.verifyObject(getAndClearObjectResult(thread())); + handlePendingException(true); + return verifyObject(getAndClearObjectResult(thread())); } - public static final Descriptor NEW_ARRAY_C = StubUtil.descriptorFor(NewArrayStub.class, "newArrayC", false); + public static final Descriptor NEW_ARRAY_C = descriptorFor(NewArrayStub.class, "newArrayC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void newArrayC(@ConstantNodeParameter Descriptor newArrayC, Word thread, Word hub, int length); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java Tue May 14 10:18:31 2013 +0200 @@ -22,12 +22,11 @@ */ package com.oracle.graal.hotspot.stubs; -import static com.oracle.graal.api.code.DeoptimizationAction.*; -import static com.oracle.graal.api.meta.DeoptimizationReason.*; import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; import static com.oracle.graal.hotspot.nodes.DirectCompareAndSwapNode.*; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; import static com.oracle.graal.hotspot.replacements.NewObjectSnippets.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -98,25 +97,18 @@ for (int offset = 2 * wordSize(); offset < sizeInBytes; offset += wordSize()) { memory.writeWord(offset, Word.zero(), ANY_LOCATION); } - return StubUtil.verifyObject(memory.toObject()); + return verifyObject(memory.toObject()); } } } if (logging()) { - StubUtil.printf("newInstance: calling new_instance_c\n"); + printf("newInstance: calling new_instance_c\n"); } newInstanceC(NEW_INSTANCE_C, thread(), hub); - - if (clearPendingException(thread())) { - if (logging()) { - StubUtil.printf("newInstance: deoptimizing to caller\n"); - } - getAndClearObjectResult(thread()); - DeoptimizeCallerNode.deopt(InvalidateReprofile, RuntimeConstraint); - } - return StubUtil.verifyObject(getAndClearObjectResult(thread())); + handlePendingException(true); + return verifyObject(getAndClearObjectResult(thread())); } /** @@ -143,10 +135,10 @@ Word tlabFreeSpaceInBytes = end.subtract(top); if (log) { - StubUtil.printf("refillTLAB: thread=%p\n", thread.rawValue()); - StubUtil.printf("refillTLAB: top=%p\n", top.rawValue()); - StubUtil.printf("refillTLAB: end=%p\n", end.rawValue()); - StubUtil.printf("refillTLAB: tlabFreeSpaceInBytes=%d\n", tlabFreeSpaceInBytes.rawValue()); + printf("refillTLAB: thread=%p\n", thread.rawValue()); + printf("refillTLAB: top=%p\n", top.rawValue()); + printf("refillTLAB: end=%p\n", end.rawValue()); + printf("refillTLAB: tlabFreeSpaceInBytes=%d\n", tlabFreeSpaceInBytes.rawValue()); } Word tlabFreeSpaceInWords = tlabFreeSpaceInBytes.unsignedShiftRight(log2WordSize()); @@ -159,12 +151,12 @@ // increment number of refills thread.writeInt(tlabNumberOfRefillsOffset(), thread.readInt(tlabNumberOfRefillsOffset(), TLAB_NOF_REFILLS_LOCATION) + 1, TLAB_NOF_REFILLS_LOCATION); if (log) { - StubUtil.printf("thread: %p -- number_of_refills %d\n", thread.rawValue(), thread.readInt(tlabNumberOfRefillsOffset(), TLAB_NOF_REFILLS_LOCATION)); + printf("thread: %p -- number_of_refills %d\n", thread.rawValue(), thread.readInt(tlabNumberOfRefillsOffset(), TLAB_NOF_REFILLS_LOCATION)); } // accumulate wastage Word wastage = thread.readWord(tlabFastRefillWasteOffset(), TLAB_FAST_REFILL_WASTE_LOCATION).add(tlabFreeSpaceInWords); if (log) { - StubUtil.printf("thread: %p -- accumulated wastage %d\n", thread.rawValue(), wastage.rawValue()); + printf("thread: %p -- accumulated wastage %d\n", thread.rawValue(), wastage.rawValue()); } thread.writeWord(tlabFastRefillWasteOffset(), wastage, TLAB_FAST_REFILL_WASTE_LOCATION); } @@ -202,7 +194,7 @@ Word newRefillWasteLimit = refillWasteLimit.add(tlabRefillWasteIncrement()); thread.writeWord(tlabRefillWasteLimitOffset(), newRefillWasteLimit, TLAB_REFILL_WASTE_LIMIT_LOCATION); if (log) { - StubUtil.printf("refillTLAB: retaining TLAB - newRefillWasteLimit=%p\n", newRefillWasteLimit.rawValue()); + printf("refillTLAB: retaining TLAB - newRefillWasteLimit=%p\n", newRefillWasteLimit.rawValue()); } if (tlabStats()) { @@ -247,7 +239,7 @@ return Boolean.getBoolean("graal.newInstanceStub.forceSlowPath"); } - public static final Descriptor NEW_INSTANCE_C = StubUtil.descriptorFor(NewInstanceStub.class, "newInstanceC", false); + public static final Descriptor NEW_INSTANCE_C = descriptorFor(NewInstanceStub.class, "newInstanceC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void newInstanceC(@ConstantNodeParameter Descriptor newInstanceC, Word thread, Word hub); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewMultiArrayStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewMultiArrayStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewMultiArrayStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -47,11 +48,11 @@ @Snippet private static Object newMultiArray(Word hub, int rank, Word dims) { newMultiArrayC(NEW_MULTI_ARRAY_C, thread(), hub, rank, dims); - StubUtil.handlePendingException(true); - return getAndClearObjectResult(thread()); + handlePendingException(true); + return verifyObject(getAndClearObjectResult(thread())); } - public static final Descriptor NEW_MULTI_ARRAY_C = StubUtil.descriptorFor(NewMultiArrayStub.class, "newMultiArrayC", false); + public static final Descriptor NEW_MULTI_ARRAY_C = descriptorFor(NewMultiArrayStub.class, "newMultiArrayC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void newMultiArrayC(@ConstantNodeParameter Descriptor newArrayC, Word thread, Word hub, int rank, Word dims); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/OSRMigrationEndStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/OSRMigrationEndStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/OSRMigrationEndStub.java Tue May 14 10:18:31 2013 +0200 @@ -22,6 +22,8 @@ */ package com.oracle.graal.hotspot.stubs; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; + import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; import com.oracle.graal.graph.Node.ConstantNodeParameter; @@ -48,7 +50,7 @@ osrMigrationEndC(OSR_MIGRATION_END_C, buffer); } - public static final Descriptor OSR_MIGRATION_END_C = StubUtil.descriptorFor(OSRMigrationEndStub.class, "osrMigrationEndC", false); + public static final Descriptor OSR_MIGRATION_END_C = descriptorFor(OSRMigrationEndStub.class, "osrMigrationEndC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void osrMigrationEndC(@ConstantNodeParameter Descriptor osrMigrationEndC, Word buffer); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/RegisterFinalizerStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/RegisterFinalizerStub.java Tue May 14 10:17:06 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.hotspot.stubs; - -import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; - -import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; -import com.oracle.graal.api.code.*; -import com.oracle.graal.graph.Node.ConstantNodeParameter; -import com.oracle.graal.graph.Node.NodeIntrinsic; -import com.oracle.graal.hotspot.*; -import com.oracle.graal.hotspot.meta.*; -import com.oracle.graal.hotspot.nodes.*; -import com.oracle.graal.nodes.java.*; -import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.replacements.*; -import com.oracle.graal.word.*; - -/** - * Stub called from {@link RegisterFinalizerNode}. - */ -public class RegisterFinalizerStub extends CRuntimeStub { - - public RegisterFinalizerStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage); - } - - @Snippet - private static void registerFinalizer(Object object) { - registerFinalizerC(REGISTER_FINALIZER_C, thread(), object); - StubUtil.handlePendingException(false); - } - - public static final Descriptor REGISTER_FINALIZER_C = StubUtil.descriptorFor(RegisterFinalizerStub.class, "registerFinalizerC", false); - - @NodeIntrinsic(CRuntimeCall.class) - public static native void registerFinalizerC(@ConstantNodeParameter Descriptor registerFinalizerC, Word thread, Object object); -} diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/RuntimeCallStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/RuntimeCallStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/RuntimeCallStub.java Tue May 14 10:18:31 2013 +0200 @@ -25,7 +25,7 @@ import static com.oracle.graal.api.meta.MetaUtil.*; import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; -import java.util.*; +import java.lang.reflect.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.code.CallingConvention.Type; @@ -37,6 +37,7 @@ import com.oracle.graal.hotspot.bridge.*; import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.hotspot.nodes.*; +import com.oracle.graal.hotspot.replacements.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.java.*; import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind; @@ -49,7 +50,7 @@ import com.oracle.graal.word.phases.*; /** - * Base class for a stub that calls into a HotSpot C/C++ runtime function using the native + * A stub that calls into a HotSpot C/C++ runtime function using the native * {@link CallingConvention}. */ public class RuntimeCallStub extends Stub { @@ -97,7 +98,11 @@ Class[] argumentTypes = d.getArgumentTypes(); JavaType[] parameterTypes = new JavaType[argumentTypes.length]; for (int i = 0; i < parameterTypes.length; ++i) { - parameterTypes[i] = runtime.lookupJavaType(argumentTypes[i]); + if (WordBase.class.isAssignableFrom(argumentTypes[i])) { + parameterTypes[i] = runtime.lookupJavaType(wordKind().toJavaClass()); + } else { + parameterTypes[i] = runtime.lookupJavaType(argumentTypes[i]); + } } TargetDescription target = graalRuntime().getTarget(); JavaType returnType = runtime.lookupJavaType(d.getResultType()); @@ -149,96 +154,121 @@ }; } + static class GraphBuilder { + + public GraphBuilder(Stub stub) { + this.graph = new StructuredGraph(stub.toString(), null); + graph.replaceFixed(graph.start(), graph.add(new StubStartNode(stub))); + this.lastFixedNode = graph.start(); + } + + final StructuredGraph graph; + private FixedWithNextNode lastFixedNode; + + T add(T node) { + T result = graph.add(node); + assert node == result; + if (result instanceof FixedNode) { + assert lastFixedNode != null; + FixedNode fixed = (FixedNode) result; + assert fixed.predecessor() == null; + graph.addAfterFixed(lastFixedNode, fixed); + if (fixed instanceof FixedWithNextNode) { + lastFixedNode = (FixedWithNextNode) fixed; + } else { + lastFixedNode = null; + } + } + return result; + } + } + @Override protected StructuredGraph getGraph() { Class[] args = linkage.getDescriptor().getArgumentTypes(); - LocalNode[] locals = new LocalNode[args.length]; + boolean isObjectResult = linkage.getCallingConvention().getReturn().getKind() == Kind.Object; + + GraphBuilder builder = new GraphBuilder(this); + + LocalNode[] locals = createLocals(builder, args); + + ReadRegisterNode thread = prependThread || isObjectResult ? builder.add(new ReadRegisterNode(runtime.threadRegister(), true, false)) : null; + ValueNode result = createTargetCall(builder, locals, thread); + createInvoke(builder, StubUtil.class, "handlePendingException", ConstantNode.forBoolean(isObjectResult, builder.graph)); + if (isObjectResult) { + InvokeNode object = createInvoke(builder, HotSpotReplacementsUtil.class, "getAndClearObjectResult", thread); + result = createInvoke(builder, StubUtil.class, "verifyObject", object); + } + builder.add(new ReturnNode(linkage.descriptor.getResultType() == void.class ? null : result)); - StructuredGraph graph = new StructuredGraph(toString(), null); - StubStartNode start = graph.add(new StubStartNode(this)); - graph.replaceFixed(graph.start(), start); + if (Debug.isDumpEnabled()) { + Debug.dump(builder.graph, "Initial stub graph"); + } + + for (InvokeNode invoke : builder.graph.getNodes(InvokeNode.class).snapshot()) { + inline(invoke); + } + assert builder.graph.getNodes(InvokeNode.class).isEmpty(); + if (Debug.isDumpEnabled()) { + Debug.dump(builder.graph, "Stub graph before compilation"); + } + + return builder.graph; + } + + private LocalNode[] createLocals(GraphBuilder builder, Class[] args) { + LocalNode[] locals = new LocalNode[args.length]; ResolvedJavaType accessingClass = runtime.lookupJavaType(getClass()); for (int i = 0; i < args.length; i++) { - JavaType type = runtime.lookupJavaType(args[i]).resolve(accessingClass); + ResolvedJavaType type = runtime.lookupJavaType(args[i]).resolve(accessingClass); Kind kind = type.getKind().getStackKind(); Stamp stamp; if (kind == Kind.Object) { - stamp = StampFactory.declared((ResolvedJavaType) type); + stamp = StampFactory.declared(type); } else { stamp = StampFactory.forKind(kind); } - LocalNode local = graph.unique(new LocalNode(i, stamp)); + LocalNode local = builder.add(new LocalNode(i, stamp)); locals[i] = local; } - - // Create target call - CRuntimeCall call = createTargetCall(locals, graph, start); - - // Create call to handlePendingException - ResolvedJavaMethod hpeMethod = resolveMethod(StubUtil.class, "handlePendingException", boolean.class); - JavaType returnType = hpeMethod.getSignature().getReturnType(null); - ValueNode[] hpeArgs = {ConstantNode.forBoolean(linkage.getCallingConvention().getReturn().getKind() == Kind.Object, graph)}; - MethodCallTargetNode hpeTarget = graph.add(new MethodCallTargetNode(InvokeKind.Static, hpeMethod, hpeArgs, returnType)); - InvokeNode hpeInvoke = graph.add(new InvokeNode(hpeTarget, FrameState.UNKNOWN_BCI)); - List emptyStack = Collections.emptyList(); - hpeInvoke.setStateAfter(graph.add(new FrameState(null, FrameState.INVALID_FRAMESTATE_BCI, new ValueNode[0], emptyStack, new ValueNode[0], false, false))); - graph.addAfterFixed(call, hpeInvoke); - - // Create return node - ReturnNode ret = graph.add(new ReturnNode(linkage.descriptor.getResultType() == void.class ? null : call)); - graph.addAfterFixed(hpeInvoke, ret); - - if (Debug.isDumpEnabled()) { - Debug.dump(graph, "Initial stub graph"); - } - - // Inline call to handlePendingException - inline(hpeInvoke); - - if (Debug.isDumpEnabled()) { - Debug.dump(graph, "Stub graph before compilation"); - } - - return graph; + return locals; } - private CRuntimeCall createTargetCall(LocalNode[] locals, StructuredGraph graph, StubStartNode start) { - CRuntimeCall call; - ValueNode[] targetArguments; + private InvokeNode createInvoke(GraphBuilder builder, Class declaringClass, String name, ValueNode... hpeArgs) { + ResolvedJavaMethod method = null; + for (Method m : declaringClass.getDeclaredMethods()) { + if (Modifier.isStatic(m.getModifiers()) && m.getName().equals(name)) { + assert method == null : "found more than one method in " + declaringClass + " named " + name; + method = runtime.lookupJavaMethod(m); + } + } + assert method != null : "did not find method in " + declaringClass + " named " + name; + JavaType returnType = method.getSignature().getReturnType(null); + MethodCallTargetNode callTarget = builder.add(new MethodCallTargetNode(InvokeKind.Static, method, hpeArgs, returnType)); + InvokeNode invoke = builder.add(new InvokeNode(callTarget, FrameState.UNKNOWN_BCI)); + return invoke; + } + + private CRuntimeCall createTargetCall(GraphBuilder builder, LocalNode[] locals, ReadRegisterNode thread) { if (prependThread) { - ReadRegisterNode thread = graph.add(new ReadRegisterNode(runtime.threadRegister(), true, false)); - graph.addAfterFixed(start, thread); - targetArguments = new ValueNode[1 + locals.length]; + ValueNode[] targetArguments = new ValueNode[1 + locals.length]; targetArguments[0] = thread; System.arraycopy(locals, 0, targetArguments, 1, locals.length); - call = graph.add(new CRuntimeCall(target.descriptor, targetArguments)); - graph.addAfterFixed(thread, call); + return builder.add(new CRuntimeCall(target.descriptor, targetArguments)); } else { - targetArguments = new ValueNode[locals.length]; - System.arraycopy(locals, 0, targetArguments, 0, locals.length); - call = graph.add(new CRuntimeCall(target.descriptor, targetArguments)); - graph.addAfterFixed(start, call); + return builder.add(new CRuntimeCall(target.descriptor, locals)); } - return call; } private void inline(InvokeNode invoke) { StructuredGraph graph = invoke.graph(); ResolvedJavaMethod method = ((MethodCallTargetNode) invoke.callTarget()).targetMethod(); ReplacementsImpl repl = new ReplacementsImpl(runtime, new Assumptions(false), runtime.getTarget()); - StructuredGraph hpeGraph = repl.makeGraph(method, null, null); - InliningUtil.inline(invoke, hpeGraph, false); + StructuredGraph calleeGraph = repl.makeGraph(method, null, null); + InliningUtil.inline(invoke, calleeGraph, false); new NodeIntrinsificationPhase(runtime).apply(graph); new WordTypeRewriterPhase(runtime, wordKind()).apply(graph); new DeadCodeEliminationPhase().apply(graph); } - - private ResolvedJavaMethod resolveMethod(Class declaringClass, String name, Class... parameterTypes) { - try { - return runtime.lookupJavaMethod(declaringClass.getDeclaredMethod(name, parameterTypes)); - } catch (Exception e) { - throw new GraalInternalError(e); - } - } } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/Stub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/Stub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/Stub.java Tue May 14 10:18:31 2013 +0200 @@ -164,8 +164,8 @@ @Override public InstalledCode call() { Stub stub = Stub.this; - HotSpotInstalledCode installedCode = new HotSpotInstalledCode(stub); - HotSpotCompilationResult hsCompResult = new HotSpotCompilationResult(stub, compResult); + HotSpotRuntimeStub installedCode = new HotSpotRuntimeStub(stub); + HotSpotCompiledCode hsCompResult = new HotSpotCompiledRuntimeStub(stub, compResult); CodeInstallResult result = graalRuntime().getCompilerToVM().installCode(hsCompResult, installedCode, null); if (result != CodeInstallResult.OK) { throw new GraalInternalError("Error installing stub %s: %s", Stub.this, result); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ThreadIsInterruptedStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ThreadIsInterruptedStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ThreadIsInterruptedStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -47,11 +48,11 @@ @Snippet private static boolean threadIsInterrupted(Thread receiverThread, boolean clearIsInterrupted) { boolean result = threadIsInterruptedC(THREAD_IS_INTERRUPTED_C, thread(), receiverThread, clearIsInterrupted); - StubUtil.handlePendingException(false); + handlePendingException(false); return result; } - public static final Descriptor THREAD_IS_INTERRUPTED_C = StubUtil.descriptorFor(ThreadIsInterruptedStub.class, "threadIsInterruptedC", false); + public static final Descriptor THREAD_IS_INTERRUPTED_C = descriptorFor(ThreadIsInterruptedStub.class, "threadIsInterruptedC", false); @NodeIntrinsic(CRuntimeCall.class) public static native boolean threadIsInterruptedC(@ConstantNodeParameter Descriptor threadIsInterruptedC, Word thread, Thread receiverThread, boolean clearIsInterrupted); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UnwindExceptionToCallerStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UnwindExceptionToCallerStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UnwindExceptionToCallerStub.java Tue May 14 10:18:31 2013 +0200 @@ -26,6 +26,7 @@ import static com.oracle.graal.hotspot.nodes.JumpToExceptionHandlerInCallerNode.*; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; import static com.oracle.graal.hotspot.stubs.ExceptionHandlerStub.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -63,11 +64,11 @@ private static void unwindExceptionToCaller(Object exception, Word returnAddress) { Pointer exceptionOop = Word.fromObject(exception); if (logging()) { - StubUtil.printf("unwinding exception %p (", exceptionOop.rawValue()); - StubUtil.decipher(exceptionOop.rawValue()); - StubUtil.printf(") at %p (", exceptionOop.rawValue(), returnAddress.rawValue()); - StubUtil.decipher(returnAddress.rawValue()); - StubUtil.printf(")\n"); + printf("unwinding exception %p (", exceptionOop.rawValue()); + decipher(exceptionOop.rawValue()); + printf(") at %p (", exceptionOop.rawValue(), returnAddress.rawValue()); + decipher(returnAddress.rawValue()); + printf(")\n"); } checkNoExceptionInThread(assertionsEnabled()); checkExceptionNotNull(assertionsEnabled(), exception); @@ -75,9 +76,9 @@ Word handlerInCallerPc = exceptionHandlerForReturnAddress(EXCEPTION_HANDLER_FOR_RETURN_ADDRESS, thread(), returnAddress); if (logging()) { - StubUtil.printf("handler for exception %p at return address %p is at %p (", exceptionOop.rawValue(), returnAddress.rawValue(), handlerInCallerPc.rawValue()); - StubUtil.decipher(handlerInCallerPc.rawValue()); - StubUtil.printf(")\n"); + printf("handler for exception %p at return address %p is at %p (", exceptionOop.rawValue(), returnAddress.rawValue(), handlerInCallerPc.rawValue()); + decipher(handlerInCallerPc.rawValue()); + printf(")\n"); } jumpToExceptionHandlerInCaller(handlerInCallerPc, exception, returnAddress); @@ -96,7 +97,7 @@ return enabled || graalRuntime().getConfig().cAssertions; } - public static final Descriptor EXCEPTION_HANDLER_FOR_RETURN_ADDRESS = StubUtil.descriptorFor(UnwindExceptionToCallerStub.class, "exceptionHandlerForReturnAddress", false); + public static final Descriptor EXCEPTION_HANDLER_FOR_RETURN_ADDRESS = descriptorFor(UnwindExceptionToCallerStub.class, "exceptionHandlerForReturnAddress", false); @NodeIntrinsic(value = CRuntimeCall.class, setStampFromReturnType = true) public static native Word exceptionHandlerForReturnAddress(@ConstantNodeParameter Descriptor exceptionHandlerForReturnAddress, Word thread, Word returnAddress); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/VMErrorStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/VMErrorStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/VMErrorStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -49,7 +50,7 @@ vmErrorC(VM_ERROR_C, thread(), where, format, value); } - public static final Descriptor VM_ERROR_C = StubUtil.descriptorFor(VMErrorStub.class, "vmErrorC", false); + public static final Descriptor VM_ERROR_C = descriptorFor(VMErrorStub.class, "vmErrorC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void vmErrorC(@ConstantNodeParameter Descriptor vmErrorC, Word thread, String where, String format, long value); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/VerifyOopStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/VerifyOopStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/VerifyOopStub.java Tue May 14 10:18:31 2013 +0200 @@ -22,6 +22,8 @@ */ package com.oracle.graal.hotspot.stubs; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; + import com.oracle.graal.api.code.*; import com.oracle.graal.hotspot.*; import com.oracle.graal.hotspot.meta.*; @@ -40,6 +42,6 @@ @Snippet private static Object verifyOop(Object object) { - return StubUtil.verifyObject(object); + return verifyObject(object); } } diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/WriteBarrierPostStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/WriteBarrierPostStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/WriteBarrierPostStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -49,7 +50,7 @@ writeBarrierPostC(WRITE_BARRIER_POST_C, thread(), object, card); } - public static final Descriptor WRITE_BARRIER_POST_C = StubUtil.descriptorFor(WriteBarrierPostStub.class, "writeBarrierPostC", false); + public static final Descriptor WRITE_BARRIER_POST_C = descriptorFor(WriteBarrierPostStub.class, "writeBarrierPostC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void writeBarrierPostC(@ConstantNodeParameter Descriptor vmErrorC, Word thread, Object object, Word card); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/WriteBarrierPreStub.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/WriteBarrierPreStub.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/WriteBarrierPreStub.java Tue May 14 10:18:31 2013 +0200 @@ -23,6 +23,7 @@ package com.oracle.graal.hotspot.stubs; import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*; +import static com.oracle.graal.hotspot.stubs.StubUtil.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.code.*; @@ -49,7 +50,7 @@ writeBarrierPreC(WRITE_BARRIER_PRE_C, thread(), object); } - public static final Descriptor WRITE_BARRIER_PRE_C = StubUtil.descriptorFor(WriteBarrierPreStub.class, "writeBarrierPreC", false); + public static final Descriptor WRITE_BARRIER_PRE_C = descriptorFor(WriteBarrierPreStub.class, "writeBarrierPreC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void writeBarrierPreC(@ConstantNodeParameter Descriptor vmErrorC, Word thread, Object object); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java Tue May 14 10:18:31 2013 +0200 @@ -26,6 +26,7 @@ import static com.oracle.graal.api.code.TypeCheckHints.*; import static com.oracle.graal.api.meta.DeoptimizationReason.*; import static com.oracle.graal.bytecode.Bytecodes.*; +import static com.oracle.graal.java.GraphBuilderPhase.RuntimeCalls.*; import static java.lang.reflect.Modifier.*; import java.lang.reflect.*; @@ -940,7 +941,7 @@ ValueNode exception = ConstantNode.forObject(cachedNullPointerException, runtime, currentGraph); trueSucc.setNext(handleException(exception, bci())); } else { - RuntimeCallNode call = currentGraph.add(new RuntimeCallNode(RuntimeCalls.CREATE_NULL_POINTER_EXCEPTION)); + RuntimeCallNode call = currentGraph.add(new RuntimeCallNode(CREATE_NULL_POINTER_EXCEPTION)); call.setStateAfter(frameState.create(bci())); trueSucc.setNext(call); call.setNext(handleException(call, bci())); @@ -966,7 +967,7 @@ ValueNode exception = ConstantNode.forObject(cachedArrayIndexOutOfBoundsException, runtime, currentGraph); falseSucc.setNext(handleException(exception, bci())); } else { - RuntimeCallNode call = currentGraph.add(new RuntimeCallNode(RuntimeCalls.CREATE_OUT_OF_BOUNDS_EXCEPTION, index)); + RuntimeCallNode call = currentGraph.add(new RuntimeCallNode(CREATE_OUT_OF_BOUNDS_EXCEPTION, index)); call.setStateAfter(frameState.create(bci())); falseSucc.setNext(call); call.setNext(handleException(call, bci())); diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java --- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java Tue May 14 10:18:31 2013 +0200 @@ -1265,7 +1265,7 @@ StructuredGraph graph = invoke.asNode().graph(); FrameState stateAfter = invoke.stateAfter(); - assert stateAfter.isAlive(); + assert stateAfter == null || stateAfter.isAlive(); IdentityHashMap replacements = new IdentityHashMap<>(); ArrayList nodes = new ArrayList<>(); @@ -1338,39 +1338,40 @@ } } - FrameState outerFrameState = null; - int callerLockDepth = stateAfter.nestedLockDepth(); - for (Node node : duplicates.values()) { - if (node instanceof FrameState) { - FrameState frameState = (FrameState) node; - assert frameState.bci != FrameState.BEFORE_BCI : frameState; - if (frameState.bci == FrameState.AFTER_BCI) { - frameState.replaceAndDelete(stateAfter); - } else if (frameState.bci == FrameState.AFTER_EXCEPTION_BCI) { - if (frameState.isAlive()) { - assert stateAtExceptionEdge != null; - frameState.replaceAndDelete(stateAtExceptionEdge); + if (stateAfter != null) { + FrameState outerFrameState = null; + int callerLockDepth = stateAfter.nestedLockDepth(); + for (Node node : duplicates.values()) { + if (node instanceof FrameState) { + FrameState frameState = (FrameState) node; + assert frameState.bci != FrameState.BEFORE_BCI : frameState; + if (frameState.bci == FrameState.AFTER_BCI) { + frameState.replaceAndDelete(stateAfter); + } else if (frameState.bci == FrameState.AFTER_EXCEPTION_BCI) { + if (frameState.isAlive()) { + assert stateAtExceptionEdge != null; + frameState.replaceAndDelete(stateAtExceptionEdge); + } else { + assert stateAtExceptionEdge == null; + } } else { - assert stateAtExceptionEdge == null; - } - } else { - // only handle the outermost frame states - if (frameState.outerFrameState() == null) { - assert frameState.bci == FrameState.INVALID_FRAMESTATE_BCI || frameState.method() == inlineGraph.method(); - if (outerFrameState == null) { - outerFrameState = stateAfter.duplicateModified(invoke.bci(), stateAfter.rethrowException(), invoke.asNode().kind()); - outerFrameState.setDuringCall(true); + // only handle the outermost frame states + if (frameState.outerFrameState() == null) { + assert frameState.bci == FrameState.INVALID_FRAMESTATE_BCI || frameState.method() == inlineGraph.method(); + if (outerFrameState == null) { + outerFrameState = stateAfter.duplicateModified(invoke.bci(), stateAfter.rethrowException(), invoke.asNode().kind()); + outerFrameState.setDuringCall(true); + } + frameState.setOuterFrameState(outerFrameState); } - frameState.setOuterFrameState(outerFrameState); } } - } - if (callerLockDepth != 0 && node instanceof MonitorReference) { - MonitorReference monitor = (MonitorReference) node; - monitor.setLockDepth(monitor.getLockDepth() + callerLockDepth); + if (callerLockDepth != 0 && node instanceof MonitorReference) { + MonitorReference monitor = (MonitorReference) node; + monitor.setLockDepth(monitor.getLockDepth() + callerLockDepth); + } } } - Node returnValue = null; if (returnNode != null) { if (returnNode.result() instanceof LocalNode) { diff -r f44d7e24cebd -r cf0e31151830 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/verify/VerifyValueUsage.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/verify/VerifyValueUsage.java Tue May 14 10:17:06 2013 +0200 +++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/verify/VerifyValueUsage.java Tue May 14 10:18:31 2013 +0200 @@ -51,9 +51,10 @@ @Override protected boolean verify(StructuredGraph graph) { for (ObjectEqualsNode cn : graph.getNodes().filter(ObjectEqualsNode.class)) { - String desc = "VerifyValueUsage: " + cn.x() + " or " + cn.y() + " in " + graph.method() + " uses object identity. Should use equals() instead."; - if (!graph.method().toString().endsWith("equals(Object)>")) { - assert !((checkType(cn.x()) && !(cn.y() instanceof ConstantNode)) || (checkType(cn.y()) && !(cn.x() instanceof ConstantNode))) : desc; + Signature signature = graph.method().getSignature(); + if (!(graph.method().getName().equals("equals") && signature.getParameterCount(false) == 1 && signature.getParameterKind(0).equals(Kind.Object))) { + assert !((checkType(cn.x()) && !(cn.y() instanceof ConstantNode)) || (checkType(cn.y()) && !(cn.x() instanceof ConstantNode))) : "VerifyValueUsage: " + cn.x() + " or " + cn.y() + + " in " + graph.method() + " uses object identity. Should use equals() instead."; } } return true; diff -r f44d7e24cebd -r cf0e31151830 make/build-graal.xml --- a/make/build-graal.xml Tue May 14 10:17:06 2013 +0200 +++ b/make/build-graal.xml Tue May 14 10:18:31 2013 +0200 @@ -77,7 +77,7 @@ - + diff -r f44d7e24cebd -r cf0e31151830 mx/commands.py --- a/mx/commands.py Tue May 14 10:17:06 2013 +0200 +++ b/mx/commands.py Tue May 14 10:18:31 2013 +0200 @@ -763,6 +763,12 @@ exe = join(jdk, 'bin', mx.exe_suffix('java')) dbg = _native_dbg.split() if _native_dbg is not None else [] + + if '-version' in args: + ignoredArgs = args[args.index('-version')+1:] + if len(ignoredArgs) > 0: + mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs)) + return mx.run(dbg + [exe, '-' + vm] + args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout) def _find_classes_with_annotations(p, pkgRoot, annotations, includeInnerClasses=False): diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/classfile/systemDictionary.hpp --- a/src/share/vm/classfile/systemDictionary.hpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/classfile/systemDictionary.hpp Tue May 14 10:18:31 2013 +0200 @@ -185,10 +185,13 @@ /* Support for Graal */ \ do_klass(BitSet_klass, java_util_BitSet, Opt) \ /* graal.hotspot */ \ - do_klass(HotSpotCompilationResult_klass, com_oracle_graal_hotspot_HotSpotCompilationResult, Opt) \ + do_klass(HotSpotCompiledCode_klass, com_oracle_graal_hotspot_HotSpotCompiledCode, Opt) \ + do_klass(HotSpotCompiledNmethod_klass, com_oracle_graal_hotspot_HotSpotCompiledNmethod, Opt) \ + do_klass(HotSpotCompiledRuntimeStub_klass, com_oracle_graal_hotspot_HotSpotCompiledRuntimeStub, Opt) \ do_klass(HotSpotRuntimeCallTarget_klass, com_oracle_graal_hotspot_HotSpotRuntimeCallTarget, Opt) \ do_klass(HotSpotCodeInfo_klass, com_oracle_graal_hotspot_meta_HotSpotCodeInfo, Opt) \ do_klass(HotSpotInstalledCode_klass, com_oracle_graal_hotspot_meta_HotSpotInstalledCode, Opt) \ + do_klass(HotSpotNmethod_klass, com_oracle_graal_hotspot_meta_HotSpotNmethod, Opt) \ do_klass(HotSpotJavaType_klass, com_oracle_graal_hotspot_meta_HotSpotJavaType, Opt) \ do_klass(HotSpotMethodData_klass, com_oracle_graal_hotspot_meta_HotSpotMethodData, Opt) \ do_klass(HotSpotResolvedJavaField_klass, com_oracle_graal_hotspot_meta_HotSpotResolvedJavaField, Opt) \ diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/classfile/vmSymbols.hpp Tue May 14 10:18:31 2013 +0200 @@ -296,12 +296,15 @@ template(com_oracle_graal_hotspot_HotSpotGraalRuntime, "com/oracle/graal/hotspot/HotSpotGraalRuntime") \ template(com_oracle_graal_hotspot_HotSpotKlassOop, "com/oracle/graal/hotspot/HotSpotKlassOop") \ template(com_oracle_graal_hotspot_HotSpotOptions, "com/oracle/graal/hotspot/HotSpotOptions") \ - template(com_oracle_graal_hotspot_HotSpotCompilationResult, "com/oracle/graal/hotspot/HotSpotCompilationResult") \ + template(com_oracle_graal_hotspot_HotSpotCompiledCode, "com/oracle/graal/hotspot/HotSpotCompiledCode") \ + template(com_oracle_graal_hotspot_HotSpotCompiledNmethod, "com/oracle/graal/hotspot/HotSpotCompiledNmethod") \ + template(com_oracle_graal_hotspot_HotSpotCompiledRuntimeStub, "com/oracle/graal/hotspot/HotSpotCompiledRuntimeStub") \ template(com_oracle_graal_hotspot_HotSpotRuntimeCallTarget, "com/oracle/graal/hotspot/HotSpotRuntimeCallTarget") \ template(com_oracle_graal_hotspot_bridge_VMToCompiler, "com/oracle/graal/hotspot/bridge/VMToCompiler") \ template(com_oracle_graal_hotspot_bridge_CompilerToVMImpl, "com/oracle/graal/hotspot/bridge/CompilerToVMImpl") \ template(com_oracle_graal_hotspot_meta_HotSpotCodeInfo, "com/oracle/graal/hotspot/meta/HotSpotCodeInfo") \ template(com_oracle_graal_hotspot_meta_HotSpotInstalledCode, "com/oracle/graal/hotspot/meta/HotSpotInstalledCode") \ + template(com_oracle_graal_hotspot_meta_HotSpotNmethod, "com/oracle/graal/hotspot/meta/HotSpotNmethod") \ template(com_oracle_graal_hotspot_meta_HotSpotJavaType, "com/oracle/graal/hotspot/meta/HotSpotJavaType") \ template(com_oracle_graal_hotspot_meta_HotSpotMethodData, "com/oracle/graal/hotspot/meta/HotSpotMethodData") \ template(com_oracle_graal_hotspot_meta_HotSpotResolvedJavaField, "com/oracle/graal/hotspot/meta/HotSpotResolvedJavaField") \ diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/code/nmethod.cpp Tue May 14 10:18:31 2013 +0200 @@ -1,515 +1,515 @@ -/* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "code/codeCache.hpp" -#include "code/compiledIC.hpp" -#include "code/dependencies.hpp" -#include "code/nmethod.hpp" -#include "code/scopeDesc.hpp" -#include "compiler/abstractCompiler.hpp" -#include "compiler/compileBroker.hpp" -#include "compiler/compileLog.hpp" -#include "compiler/compilerOracle.hpp" -#include "compiler/disassembler.hpp" -#include "interpreter/bytecode.hpp" -#include "oops/methodData.hpp" -#include "prims/jvmtiRedefineClassesTrace.hpp" -#include "prims/jvmtiImpl.hpp" -#include "runtime/sharedRuntime.hpp" -#include "runtime/sweeper.hpp" -#include "utilities/dtrace.hpp" -#include "utilities/events.hpp" -#include "utilities/xmlstream.hpp" -#ifdef SHARK -#include "shark/sharkCompiler.hpp" -#endif -#ifdef GRAAL -#include "graal/graalJavaAccess.hpp" -#endif - -#ifdef DTRACE_ENABLED - -// Only bother with this argument setup if dtrace is available - -#ifndef USDT2 -HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load, - const char*, int, const char*, int, const char*, int, void*, size_t); - -HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload, - char*, int, char*, int, char*, int); - -#define DTRACE_METHOD_UNLOAD_PROBE(method) \ - { \ - Method* m = (method); \ - if (m != NULL) { \ - Symbol* klass_name = m->klass_name(); \ - Symbol* name = m->name(); \ - Symbol* signature = m->signature(); \ - HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \ - klass_name->bytes(), klass_name->utf8_length(), \ - name->bytes(), name->utf8_length(), \ - signature->bytes(), signature->utf8_length()); \ - } \ - } -#else /* USDT2 */ -#define DTRACE_METHOD_UNLOAD_PROBE(method) \ - { \ - Method* m = (method); \ - if (m != NULL) { \ - Symbol* klass_name = m->klass_name(); \ - Symbol* name = m->name(); \ - Symbol* signature = m->signature(); \ - HOTSPOT_COMPILED_METHOD_UNLOAD( \ - (char *) klass_name->bytes(), klass_name->utf8_length(), \ - (char *) name->bytes(), name->utf8_length(), \ - (char *) signature->bytes(), signature->utf8_length()); \ - } \ - } -#endif /* USDT2 */ - -#else // ndef DTRACE_ENABLED - -#define DTRACE_METHOD_UNLOAD_PROBE(method) - -#endif - -bool nmethod::is_compiled_by_c1() const { - if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing - if (is_native_method()) return false; - return compiler()->is_c1(); -} -bool nmethod::is_compiled_by_graal() const { - if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing - if (is_native_method()) return false; - return compiler()->is_graal(); -} -bool nmethod::is_compiled_by_c2() const { - if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing - if (is_native_method()) return false; - return compiler()->is_c2(); -} -bool nmethod::is_compiled_by_shark() const { - if (is_native_method()) return false; - assert(compiler() != NULL, "must be"); - return compiler()->is_shark(); -} - - - -//--------------------------------------------------------------------------------- -// NMethod statistics -// They are printed under various flags, including: -// PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation. -// (In the latter two cases, they like other stats are printed to the log only.) - -// These variables are put into one block to reduce relocations -// and make it simpler to print from the debugger. -static -struct nmethod_stats_struct { - int nmethod_count; - int total_size; - int relocation_size; - int consts_size; - int insts_size; - int stub_size; - int scopes_data_size; - int scopes_pcs_size; - int dependencies_size; - int handler_table_size; - int nul_chk_table_size; - int oops_size; - - void note_nmethod(nmethod* nm) { - nmethod_count += 1; - total_size += nm->size(); - relocation_size += nm->relocation_size(); - consts_size += nm->consts_size(); - insts_size += nm->insts_size(); - stub_size += nm->stub_size(); - oops_size += nm->oops_size(); - scopes_data_size += nm->scopes_data_size(); - scopes_pcs_size += nm->scopes_pcs_size(); - dependencies_size += nm->dependencies_size(); - handler_table_size += nm->handler_table_size(); - nul_chk_table_size += nm->nul_chk_table_size(); - } - void print_nmethod_stats() { - if (nmethod_count == 0) return; - tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count); - if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); - if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); - if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); - if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); - if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); - if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); - if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); - if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); - if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); - if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); - if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); - } - - int native_nmethod_count; - int native_total_size; - int native_relocation_size; - int native_insts_size; - int native_oops_size; - void note_native_nmethod(nmethod* nm) { - native_nmethod_count += 1; - native_total_size += nm->size(); - native_relocation_size += nm->relocation_size(); - native_insts_size += nm->insts_size(); - native_oops_size += nm->oops_size(); - } - void print_native_nmethod_stats() { - if (native_nmethod_count == 0) return; - tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count); - if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size); - if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size); - if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size); - if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size); - } - - int pc_desc_resets; // number of resets (= number of caches) - int pc_desc_queries; // queries to nmethod::find_pc_desc - int pc_desc_approx; // number of those which have approximate true - int pc_desc_repeats; // number of _pc_descs[0] hits - int pc_desc_hits; // number of LRU cache hits - int pc_desc_tests; // total number of PcDesc examinations - int pc_desc_searches; // total number of quasi-binary search steps - int pc_desc_adds; // number of LUR cache insertions - - void print_pc_stats() { - tty->print_cr("PcDesc Statistics: %d queries, %.2f comparisons per query", - pc_desc_queries, - (double)(pc_desc_tests + pc_desc_searches) - / pc_desc_queries); - tty->print_cr(" caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d", - pc_desc_resets, - pc_desc_queries, pc_desc_approx, - pc_desc_repeats, pc_desc_hits, - pc_desc_tests, pc_desc_searches, pc_desc_adds); - } -} nmethod_stats; - - -//--------------------------------------------------------------------------------- - - -ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { - assert(pc != NULL, "Must be non null"); - assert(exception.not_null(), "Must be non null"); - assert(handler != NULL, "Must be non null"); - - _count = 0; - _exception_type = exception->klass(); - _next = NULL; - - add_address_and_handler(pc,handler); -} - - -address ExceptionCache::match(Handle exception, address pc) { - assert(pc != NULL,"Must be non null"); - assert(exception.not_null(),"Must be non null"); - if (exception->klass() == exception_type()) { - return (test_address(pc)); - } - - return NULL; -} - - -bool ExceptionCache::match_exception_with_space(Handle exception) { - assert(exception.not_null(),"Must be non null"); - if (exception->klass() == exception_type() && count() < cache_size) { - return true; - } - return false; -} - - -address ExceptionCache::test_address(address addr) { - for (int i=0; imatch_exception_with_space(exception)) { - return ec; - } - ec = ec->next(); - } - return NULL; -} - - -//----------------------------------------------------------------------------- - - -// Helper used by both find_pc_desc methods. -static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) { - NOT_PRODUCT(++nmethod_stats.pc_desc_tests); - if (!approximate) - return pc->pc_offset() == pc_offset; - else - return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); -} - -void PcDescCache::reset_to(PcDesc* initial_pc_desc) { - if (initial_pc_desc == NULL) { - _pc_descs[0] = NULL; // native method; no PcDescs at all - return; - } - NOT_PRODUCT(++nmethod_stats.pc_desc_resets); - // reset the cache by filling it with benign (non-null) values - assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); - for (int i = 0; i < cache_size; i++) - _pc_descs[i] = initial_pc_desc; -} - -PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { - NOT_PRODUCT(++nmethod_stats.pc_desc_queries); - NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); - - // Note: one might think that caching the most recently - // read value separately would be a win, but one would be - // wrong. When many threads are updating it, the cache - // line it's in would bounce between caches, negating - // any benefit. - - // In order to prevent race conditions do not load cache elements - // repeatedly, but use a local copy: - PcDesc* res; - - // Step one: Check the most recently added value. - res = _pc_descs[0]; - if (res == NULL) return NULL; // native method; no PcDescs at all - if (match_desc(res, pc_offset, approximate)) { - NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); - return res; - } - - // Step two: Check the rest of the LRU cache. - for (int i = 1; i < cache_size; ++i) { - res = _pc_descs[i]; - if (res->pc_offset() < 0) break; // optimization: skip empty cache - if (match_desc(res, pc_offset, approximate)) { - NOT_PRODUCT(++nmethod_stats.pc_desc_hits); - return res; - } - } - - // Report failure. - return NULL; -} - -void PcDescCache::add_pc_desc(PcDesc* pc_desc) { - NOT_PRODUCT(++nmethod_stats.pc_desc_adds); - // Update the LRU cache by shifting pc_desc forward. - for (int i = 0; i < cache_size; i++) { - PcDesc* next = _pc_descs[i]; - _pc_descs[i] = pc_desc; - pc_desc = next; - } -} - -// adjust pcs_size so that it is a multiple of both oopSize and -// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple -// of oopSize, then 2*sizeof(PcDesc) is) -static int adjust_pcs_size(int pcs_size) { - int nsize = round_to(pcs_size, oopSize); - if ((nsize % sizeof(PcDesc)) != 0) { - nsize = pcs_size + sizeof(PcDesc); - } - assert((nsize % oopSize) == 0, "correct alignment"); - return nsize; -} - -//----------------------------------------------------------------------------- - - -void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { - assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); - assert(new_entry != NULL,"Must be non null"); - assert(new_entry->next() == NULL, "Must be null"); - - if (exception_cache() != NULL) { - new_entry->set_next(exception_cache()); - } - set_exception_cache(new_entry); -} - -void nmethod::remove_from_exception_cache(ExceptionCache* ec) { - ExceptionCache* prev = NULL; - ExceptionCache* curr = exception_cache(); - assert(curr != NULL, "nothing to remove"); - // find the previous and next entry of ec - while (curr != ec) { - prev = curr; - curr = curr->next(); - assert(curr != NULL, "ExceptionCache not found"); - } - // now: curr == ec - ExceptionCache* next = curr->next(); - if (prev == NULL) { - set_exception_cache(next); - } else { - prev->set_next(next); - } - delete curr; -} - - -// public method for accessing the exception cache -// These are the public access methods. -address nmethod::handler_for_exception_and_pc(Handle exception, address pc) { - // We never grab a lock to read the exception cache, so we may - // have false negatives. This is okay, as it can only happen during - // the first few exception lookups for a given nmethod. - ExceptionCache* ec = exception_cache(); - while (ec != NULL) { - address ret_val; - if ((ret_val = ec->match(exception,pc)) != NULL) { - return ret_val; - } - ec = ec->next(); - } - return NULL; -} - - -void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { - // There are potential race conditions during exception cache updates, so we - // must own the ExceptionCache_lock before doing ANY modifications. Because - // we don't lock during reads, it is possible to have several threads attempt - // to update the cache with the same data. We need to check for already inserted - // copies of the current data before adding it. - - MutexLocker ml(ExceptionCache_lock); - ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); - - if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { - target_entry = new ExceptionCache(exception,pc,handler); - add_exception_cache_entry(target_entry); - } -} - - -//-------------end of code for ExceptionCache-------------- - - -int nmethod::total_size() const { - return - consts_size() + - insts_size() + - stub_size() + - scopes_data_size() + - scopes_pcs_size() + - handler_table_size() + - nul_chk_table_size(); -} - -const char* nmethod::compile_kind() const { - if (is_osr_method()) return "osr"; - if (method() != NULL && is_native_method()) return "c2n"; - return NULL; -} - -// Fill in default values for various flag fields -void nmethod::init_defaults() { - _state = alive; - _marked_for_reclamation = 0; - _has_flushed_dependencies = 0; - _speculatively_disconnected = 0; - _has_unsafe_access = 0; - _has_method_handle_invokes = 0; - _lazy_critical_native = 0; - _has_wide_vectors = 0; - _marked_for_deoptimization = 0; - _lock_count = 0; - _stack_traversal_mark = 0; - _unload_reported = false; // jvmti state - -#ifdef ASSERT - _oops_are_stale = false; -#endif - - _oops_do_mark_link = NULL; - _jmethod_id = NULL; - _osr_link = NULL; - _scavenge_root_link = NULL; - _scavenge_root_state = 0; - _saved_nmethod_link = NULL; - _compiler = NULL; -#ifdef GRAAL - _graal_installed_code = NULL; - _triggered_deoptimizations = NULL; -#endif -#ifdef HAVE_DTRACE_H - _trap_offset = 0; -#endif // def HAVE_DTRACE_H -} - -nmethod* nmethod::new_native_nmethod(methodHandle method, - int compile_id, - CodeBuffer *code_buffer, - int vep_offset, - int frame_complete, - int frame_size, - ByteSize basic_lock_owner_sp_offset, - ByteSize basic_lock_sp_offset, - OopMapSet* oop_maps) { - code_buffer->finalize_oop_references(method); - // create nmethod - nmethod* nm = NULL; - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); +/* + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "code/codeCache.hpp" +#include "code/compiledIC.hpp" +#include "code/dependencies.hpp" +#include "code/nmethod.hpp" +#include "code/scopeDesc.hpp" +#include "compiler/abstractCompiler.hpp" +#include "compiler/compileBroker.hpp" +#include "compiler/compileLog.hpp" +#include "compiler/compilerOracle.hpp" +#include "compiler/disassembler.hpp" +#include "interpreter/bytecode.hpp" +#include "oops/methodData.hpp" +#include "prims/jvmtiRedefineClassesTrace.hpp" +#include "prims/jvmtiImpl.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/sweeper.hpp" +#include "utilities/dtrace.hpp" +#include "utilities/events.hpp" +#include "utilities/xmlstream.hpp" +#ifdef SHARK +#include "shark/sharkCompiler.hpp" +#endif +#ifdef GRAAL +#include "graal/graalJavaAccess.hpp" +#endif + +#ifdef DTRACE_ENABLED + +// Only bother with this argument setup if dtrace is available + +#ifndef USDT2 +HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load, + const char*, int, const char*, int, const char*, int, void*, size_t); + +HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload, + char*, int, char*, int, char*, int); + +#define DTRACE_METHOD_UNLOAD_PROBE(method) \ + { \ + Method* m = (method); \ + if (m != NULL) { \ + Symbol* klass_name = m->klass_name(); \ + Symbol* name = m->name(); \ + Symbol* signature = m->signature(); \ + HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \ + klass_name->bytes(), klass_name->utf8_length(), \ + name->bytes(), name->utf8_length(), \ + signature->bytes(), signature->utf8_length()); \ + } \ + } +#else /* USDT2 */ +#define DTRACE_METHOD_UNLOAD_PROBE(method) \ + { \ + Method* m = (method); \ + if (m != NULL) { \ + Symbol* klass_name = m->klass_name(); \ + Symbol* name = m->name(); \ + Symbol* signature = m->signature(); \ + HOTSPOT_COMPILED_METHOD_UNLOAD( \ + (char *) klass_name->bytes(), klass_name->utf8_length(), \ + (char *) name->bytes(), name->utf8_length(), \ + (char *) signature->bytes(), signature->utf8_length()); \ + } \ + } +#endif /* USDT2 */ + +#else // ndef DTRACE_ENABLED + +#define DTRACE_METHOD_UNLOAD_PROBE(method) + +#endif + +bool nmethod::is_compiled_by_c1() const { + if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing + if (is_native_method()) return false; + return compiler()->is_c1(); +} +bool nmethod::is_compiled_by_graal() const { + if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing + if (is_native_method()) return false; + return compiler()->is_graal(); +} +bool nmethod::is_compiled_by_c2() const { + if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing + if (is_native_method()) return false; + return compiler()->is_c2(); +} +bool nmethod::is_compiled_by_shark() const { + if (is_native_method()) return false; + assert(compiler() != NULL, "must be"); + return compiler()->is_shark(); +} + + + +//--------------------------------------------------------------------------------- +// NMethod statistics +// They are printed under various flags, including: +// PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation. +// (In the latter two cases, they like other stats are printed to the log only.) + +// These variables are put into one block to reduce relocations +// and make it simpler to print from the debugger. +static +struct nmethod_stats_struct { + int nmethod_count; + int total_size; + int relocation_size; + int consts_size; + int insts_size; + int stub_size; + int scopes_data_size; + int scopes_pcs_size; + int dependencies_size; + int handler_table_size; + int nul_chk_table_size; + int oops_size; + + void note_nmethod(nmethod* nm) { + nmethod_count += 1; + total_size += nm->size(); + relocation_size += nm->relocation_size(); + consts_size += nm->consts_size(); + insts_size += nm->insts_size(); + stub_size += nm->stub_size(); + oops_size += nm->oops_size(); + scopes_data_size += nm->scopes_data_size(); + scopes_pcs_size += nm->scopes_pcs_size(); + dependencies_size += nm->dependencies_size(); + handler_table_size += nm->handler_table_size(); + nul_chk_table_size += nm->nul_chk_table_size(); + } + void print_nmethod_stats() { + if (nmethod_count == 0) return; + tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count); + if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); + if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); + if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); + if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); + if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); + if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); + if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); + if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); + if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); + if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); + if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); + } + + int native_nmethod_count; + int native_total_size; + int native_relocation_size; + int native_insts_size; + int native_oops_size; + void note_native_nmethod(nmethod* nm) { + native_nmethod_count += 1; + native_total_size += nm->size(); + native_relocation_size += nm->relocation_size(); + native_insts_size += nm->insts_size(); + native_oops_size += nm->oops_size(); + } + void print_native_nmethod_stats() { + if (native_nmethod_count == 0) return; + tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count); + if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size); + if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size); + if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size); + if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size); + } + + int pc_desc_resets; // number of resets (= number of caches) + int pc_desc_queries; // queries to nmethod::find_pc_desc + int pc_desc_approx; // number of those which have approximate true + int pc_desc_repeats; // number of _pc_descs[0] hits + int pc_desc_hits; // number of LRU cache hits + int pc_desc_tests; // total number of PcDesc examinations + int pc_desc_searches; // total number of quasi-binary search steps + int pc_desc_adds; // number of LUR cache insertions + + void print_pc_stats() { + tty->print_cr("PcDesc Statistics: %d queries, %.2f comparisons per query", + pc_desc_queries, + (double)(pc_desc_tests + pc_desc_searches) + / pc_desc_queries); + tty->print_cr(" caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d", + pc_desc_resets, + pc_desc_queries, pc_desc_approx, + pc_desc_repeats, pc_desc_hits, + pc_desc_tests, pc_desc_searches, pc_desc_adds); + } +} nmethod_stats; + + +//--------------------------------------------------------------------------------- + + +ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { + assert(pc != NULL, "Must be non null"); + assert(exception.not_null(), "Must be non null"); + assert(handler != NULL, "Must be non null"); + + _count = 0; + _exception_type = exception->klass(); + _next = NULL; + + add_address_and_handler(pc,handler); +} + + +address ExceptionCache::match(Handle exception, address pc) { + assert(pc != NULL,"Must be non null"); + assert(exception.not_null(),"Must be non null"); + if (exception->klass() == exception_type()) { + return (test_address(pc)); + } + + return NULL; +} + + +bool ExceptionCache::match_exception_with_space(Handle exception) { + assert(exception.not_null(),"Must be non null"); + if (exception->klass() == exception_type() && count() < cache_size) { + return true; + } + return false; +} + + +address ExceptionCache::test_address(address addr) { + for (int i=0; imatch_exception_with_space(exception)) { + return ec; + } + ec = ec->next(); + } + return NULL; +} + + +//----------------------------------------------------------------------------- + + +// Helper used by both find_pc_desc methods. +static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) { + NOT_PRODUCT(++nmethod_stats.pc_desc_tests); + if (!approximate) + return pc->pc_offset() == pc_offset; + else + return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); +} + +void PcDescCache::reset_to(PcDesc* initial_pc_desc) { + if (initial_pc_desc == NULL) { + _pc_descs[0] = NULL; // native method; no PcDescs at all + return; + } + NOT_PRODUCT(++nmethod_stats.pc_desc_resets); + // reset the cache by filling it with benign (non-null) values + assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); + for (int i = 0; i < cache_size; i++) + _pc_descs[i] = initial_pc_desc; +} + +PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { + NOT_PRODUCT(++nmethod_stats.pc_desc_queries); + NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); + + // Note: one might think that caching the most recently + // read value separately would be a win, but one would be + // wrong. When many threads are updating it, the cache + // line it's in would bounce between caches, negating + // any benefit. + + // In order to prevent race conditions do not load cache elements + // repeatedly, but use a local copy: + PcDesc* res; + + // Step one: Check the most recently added value. + res = _pc_descs[0]; + if (res == NULL) return NULL; // native method; no PcDescs at all + if (match_desc(res, pc_offset, approximate)) { + NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); + return res; + } + + // Step two: Check the rest of the LRU cache. + for (int i = 1; i < cache_size; ++i) { + res = _pc_descs[i]; + if (res->pc_offset() < 0) break; // optimization: skip empty cache + if (match_desc(res, pc_offset, approximate)) { + NOT_PRODUCT(++nmethod_stats.pc_desc_hits); + return res; + } + } + + // Report failure. + return NULL; +} + +void PcDescCache::add_pc_desc(PcDesc* pc_desc) { + NOT_PRODUCT(++nmethod_stats.pc_desc_adds); + // Update the LRU cache by shifting pc_desc forward. + for (int i = 0; i < cache_size; i++) { + PcDesc* next = _pc_descs[i]; + _pc_descs[i] = pc_desc; + pc_desc = next; + } +} + +// adjust pcs_size so that it is a multiple of both oopSize and +// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple +// of oopSize, then 2*sizeof(PcDesc) is) +static int adjust_pcs_size(int pcs_size) { + int nsize = round_to(pcs_size, oopSize); + if ((nsize % sizeof(PcDesc)) != 0) { + nsize = pcs_size + sizeof(PcDesc); + } + assert((nsize % oopSize) == 0, "correct alignment"); + return nsize; +} + +//----------------------------------------------------------------------------- + + +void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { + assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); + assert(new_entry != NULL,"Must be non null"); + assert(new_entry->next() == NULL, "Must be null"); + + if (exception_cache() != NULL) { + new_entry->set_next(exception_cache()); + } + set_exception_cache(new_entry); +} + +void nmethod::remove_from_exception_cache(ExceptionCache* ec) { + ExceptionCache* prev = NULL; + ExceptionCache* curr = exception_cache(); + assert(curr != NULL, "nothing to remove"); + // find the previous and next entry of ec + while (curr != ec) { + prev = curr; + curr = curr->next(); + assert(curr != NULL, "ExceptionCache not found"); + } + // now: curr == ec + ExceptionCache* next = curr->next(); + if (prev == NULL) { + set_exception_cache(next); + } else { + prev->set_next(next); + } + delete curr; +} + + +// public method for accessing the exception cache +// These are the public access methods. +address nmethod::handler_for_exception_and_pc(Handle exception, address pc) { + // We never grab a lock to read the exception cache, so we may + // have false negatives. This is okay, as it can only happen during + // the first few exception lookups for a given nmethod. + ExceptionCache* ec = exception_cache(); + while (ec != NULL) { + address ret_val; + if ((ret_val = ec->match(exception,pc)) != NULL) { + return ret_val; + } + ec = ec->next(); + } + return NULL; +} + + +void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { + // There are potential race conditions during exception cache updates, so we + // must own the ExceptionCache_lock before doing ANY modifications. Because + // we don't lock during reads, it is possible to have several threads attempt + // to update the cache with the same data. We need to check for already inserted + // copies of the current data before adding it. + + MutexLocker ml(ExceptionCache_lock); + ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); + + if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { + target_entry = new ExceptionCache(exception,pc,handler); + add_exception_cache_entry(target_entry); + } +} + + +//-------------end of code for ExceptionCache-------------- + + +int nmethod::total_size() const { + return + consts_size() + + insts_size() + + stub_size() + + scopes_data_size() + + scopes_pcs_size() + + handler_table_size() + + nul_chk_table_size(); +} + +const char* nmethod::compile_kind() const { + if (is_osr_method()) return "osr"; + if (method() != NULL && is_native_method()) return "c2n"; + return NULL; +} + +// Fill in default values for various flag fields +void nmethod::init_defaults() { + _state = alive; + _marked_for_reclamation = 0; + _has_flushed_dependencies = 0; + _speculatively_disconnected = 0; + _has_unsafe_access = 0; + _has_method_handle_invokes = 0; + _lazy_critical_native = 0; + _has_wide_vectors = 0; + _marked_for_deoptimization = 0; + _lock_count = 0; + _stack_traversal_mark = 0; + _unload_reported = false; // jvmti state + +#ifdef ASSERT + _oops_are_stale = false; +#endif + + _oops_do_mark_link = NULL; + _jmethod_id = NULL; + _osr_link = NULL; + _scavenge_root_link = NULL; + _scavenge_root_state = 0; + _saved_nmethod_link = NULL; + _compiler = NULL; +#ifdef GRAAL + _graal_installed_code = NULL; + _triggered_deoptimizations = NULL; +#endif +#ifdef HAVE_DTRACE_H + _trap_offset = 0; +#endif // def HAVE_DTRACE_H +} + +nmethod* nmethod::new_native_nmethod(methodHandle method, + int compile_id, + CodeBuffer *code_buffer, + int vep_offset, + int frame_complete, + int frame_size, + ByteSize basic_lock_owner_sp_offset, + ByteSize basic_lock_sp_offset, + OopMapSet* oop_maps) { + code_buffer->finalize_oop_references(method); + // create nmethod + nmethod* nm = NULL; + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); if (CodeCache::has_space(native_nmethod_size)) { CodeOffsets offsets; offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); @@ -523,2508 +523,2508 @@ if (PrintAssembly && nm != NULL) Disassembler::decode(nm); } - } - // verify nmethod - debug_only(if (nm) nm->verify();) // might block - - if (nm != NULL) { - nm->log_new_nmethod(); - } - - return nm; -} - -#ifdef HAVE_DTRACE_H -nmethod* nmethod::new_dtrace_nmethod(methodHandle method, - CodeBuffer *code_buffer, - int vep_offset, - int trap_offset, - int frame_complete, - int frame_size) { - code_buffer->finalize_oop_references(method); - // create nmethod - nmethod* nm = NULL; - { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); + } + // verify nmethod + debug_only(if (nm) nm->verify();) // might block + + if (nm != NULL) { + nm->log_new_nmethod(); + } + + return nm; +} + +#ifdef HAVE_DTRACE_H +nmethod* nmethod::new_dtrace_nmethod(methodHandle method, + CodeBuffer *code_buffer, + int vep_offset, + int trap_offset, + int frame_complete, + int frame_size) { + code_buffer->finalize_oop_references(method); + // create nmethod + nmethod* nm = NULL; + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); if (CodeCache::has_space(nmethod_size)) { CodeOffsets offsets; offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - + nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size); - + if (nm != NULL) nmethod_stats.note_nmethod(nm); if (PrintAssembly && nm != NULL) Disassembler::decode(nm); } - } - // verify nmethod - debug_only(if (nm) nm->verify();) // might block - - if (nm != NULL) { - nm->log_new_nmethod(); - } - - return nm; -} - -#endif // def HAVE_DTRACE_H - -nmethod* nmethod::new_nmethod(methodHandle method, - int compile_id, - int entry_bci, - CodeOffsets* offsets, - int orig_pc_offset, - DebugInformationRecorder* debug_info, - Dependencies* dependencies, - CodeBuffer* code_buffer, int frame_size, - OopMapSet* oop_maps, - ExceptionHandlerTable* handler_table, - ImplicitExceptionTable* nul_chk_table, - AbstractCompiler* compiler, - int comp_level, - GrowableArray* leaf_graph_ids -#ifdef GRAAL - , Handle installed_code, - Handle triggered_deoptimizations -#endif -) -{ - assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); - code_buffer->finalize_oop_references(method); - int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize); - // create nmethod - nmethod* nm = NULL; - { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - int nmethod_size = - allocation_size(code_buffer, sizeof(nmethod)) - + adjust_pcs_size(debug_info->pcs_size()) - + round_to(dependencies->size_in_bytes() , oopSize) - + round_to(handler_table->size_in_bytes(), oopSize) - + round_to(nul_chk_table->size_in_bytes(), oopSize) - + round_to(debug_info->data_size() , oopSize) + } + // verify nmethod + debug_only(if (nm) nm->verify();) // might block + + if (nm != NULL) { + nm->log_new_nmethod(); + } + + return nm; +} + +#endif // def HAVE_DTRACE_H + +nmethod* nmethod::new_nmethod(methodHandle method, + int compile_id, + int entry_bci, + CodeOffsets* offsets, + int orig_pc_offset, + DebugInformationRecorder* debug_info, + Dependencies* dependencies, + CodeBuffer* code_buffer, int frame_size, + OopMapSet* oop_maps, + ExceptionHandlerTable* handler_table, + ImplicitExceptionTable* nul_chk_table, + AbstractCompiler* compiler, + int comp_level, + GrowableArray* leaf_graph_ids +#ifdef GRAAL + , Handle installed_code, + Handle triggered_deoptimizations +#endif +) +{ + assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); + code_buffer->finalize_oop_references(method); + int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize); + // create nmethod + nmethod* nm = NULL; + { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + int nmethod_size = + allocation_size(code_buffer, sizeof(nmethod)) + + adjust_pcs_size(debug_info->pcs_size()) + + round_to(dependencies->size_in_bytes() , oopSize) + + round_to(handler_table->size_in_bytes(), oopSize) + + round_to(nul_chk_table->size_in_bytes(), oopSize) + + round_to(debug_info->data_size() , oopSize) + leaf_graph_ids_size; - if (CodeCache::has_space(nmethod_size)) { - nm = new (nmethod_size) - nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, - orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, - oop_maps, - handler_table, - nul_chk_table, - compiler, - comp_level, - leaf_graph_ids -#ifdef GRAAL - , installed_code, - triggered_deoptimizations -#endif + if (CodeCache::has_space(nmethod_size)) { + nm = new (nmethod_size) + nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, + orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, + oop_maps, + handler_table, + nul_chk_table, + compiler, + comp_level, + leaf_graph_ids +#ifdef GRAAL + , installed_code, + triggered_deoptimizations +#endif ); - } - if (nm != NULL) { - // To make dependency checking during class loading fast, record - // the nmethod dependencies in the classes it is dependent on. - // This allows the dependency checking code to simply walk the - // class hierarchy above the loaded class, checking only nmethods - // which are dependent on those classes. The slow way is to - // check every nmethod for dependencies which makes it linear in - // the number of methods compiled. For applications with a lot - // classes the slow way is too slow. - for (Dependencies::DepStream deps(nm); deps.next(); ) { - Klass* klass = deps.context_type(); - if (klass == NULL) continue; // ignore things like evol_method - - // record this nmethod as dependent on this klass - InstanceKlass::cast(klass)->add_dependent_nmethod(nm); - } - } - if (nm != NULL) nmethod_stats.note_nmethod(nm); - if (PrintAssembly && nm != NULL) - Disassembler::decode(nm); - } - - // verify nmethod - debug_only(if (nm) nm->verify();) // might block - - if (nm != NULL) { - nm->log_new_nmethod(); - } - - // done - return nm; -} - - -// For native wrappers -nmethod::nmethod( - Method* method, - int nmethod_size, - int compile_id, - CodeOffsets* offsets, - CodeBuffer* code_buffer, - int frame_size, - ByteSize basic_lock_owner_sp_offset, - ByteSize basic_lock_sp_offset, - OopMapSet* oop_maps ) - : CodeBlob("native nmethod", code_buffer, sizeof(nmethod), - nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps), - _native_receiver_sp_offset(basic_lock_owner_sp_offset), - _native_basic_lock_sp_offset(basic_lock_sp_offset) -{ - { - debug_only(No_Safepoint_Verifier nsv;) - assert_locked_or_safepoint(CodeCache_lock); - - init_defaults(); - _method = method; - _entry_bci = InvocationEntryBci; - // We have no exception handler or deopt handler make the - // values something that will never match a pc like the nmethod vtable entry - _exception_offset = 0; - _deoptimize_offset = 0; - _deoptimize_mh_offset = 0; - _orig_pc_offset = 0; - - _consts_offset = data_offset(); - _stub_offset = data_offset(); - _oops_offset = data_offset(); - _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); - _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); - _scopes_pcs_offset = _scopes_data_offset; - _dependencies_offset = _scopes_pcs_offset; - _handler_table_offset = _dependencies_offset; - _nul_chk_table_offset = _handler_table_offset; - _leaf_graph_ids_offset = _nul_chk_table_offset; - _nmethod_end_offset = _leaf_graph_ids_offset; - _compile_id = compile_id; - _comp_level = CompLevel_none; - _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); - _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); - _osr_entry_point = NULL; - _exception_cache = NULL; - _pc_desc_cache.reset_to(NULL); - - code_buffer->copy_values_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); - } - debug_only(verify_scavenge_root_oops()); - CodeCache::commit(this); - } - - if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { - ttyLocker ttyl; // keep the following output all in one block - // This output goes directly to the tty, not the compiler log. - // To enable tools to match it up with the compilation activity, - // be sure to tag this tty output with the compile ID. - if (xtty != NULL) { - xtty->begin_head("print_native_nmethod"); - xtty->method(_method); - xtty->stamp(); - xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this); - } - // print the header part first - print(); - // then print the requested information - if (PrintNativeNMethods) { - print_code(); - if (oop_maps != NULL) { - oop_maps->print(); - } - } - if (PrintRelocations) { - print_relocations(); - } - if (xtty != NULL) { - xtty->tail("print_native_nmethod"); - } - } -} - -// For dtrace wrappers -#ifdef HAVE_DTRACE_H -nmethod::nmethod( - Method* method, - int nmethod_size, - CodeOffsets* offsets, - CodeBuffer* code_buffer, - int frame_size) - : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod), - nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL), - _native_receiver_sp_offset(in_ByteSize(-1)), - _native_basic_lock_sp_offset(in_ByteSize(-1)) -{ - { - debug_only(No_Safepoint_Verifier nsv;) - assert_locked_or_safepoint(CodeCache_lock); - - init_defaults(); - _method = method; - _entry_bci = InvocationEntryBci; - // We have no exception handler or deopt handler make the - // values something that will never match a pc like the nmethod vtable entry - _exception_offset = 0; - _deoptimize_offset = 0; - _deoptimize_mh_offset = 0; - _unwind_handler_offset = -1; - _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); - _orig_pc_offset = 0; - _consts_offset = data_offset(); - _stub_offset = data_offset(); - _oops_offset = data_offset(); - _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); - _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); - _scopes_pcs_offset = _scopes_data_offset; - _dependencies_offset = _scopes_pcs_offset; - _handler_table_offset = _dependencies_offset; - _nul_chk_table_offset = _handler_table_offset; - _nmethod_end_offset = _nul_chk_table_offset; - _compile_id = 0; // default - _comp_level = CompLevel_none; - _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); - _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); - _osr_entry_point = NULL; - _exception_cache = NULL; - _pc_desc_cache.reset_to(NULL); - - code_buffer->copy_values_to(this); - debug_only(verify_scavenge_root_oops()); - CodeCache::commit(this); - } - - if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { - ttyLocker ttyl; // keep the following output all in one block - // This output goes directly to the tty, not the compiler log. - // To enable tools to match it up with the compilation activity, - // be sure to tag this tty output with the compile ID. - if (xtty != NULL) { - xtty->begin_head("print_dtrace_nmethod"); - xtty->method(_method); - xtty->stamp(); - xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this); - } - // print the header part first - print(); - // then print the requested information - if (PrintNMethods) { - print_code(); - } - if (PrintRelocations) { - print_relocations(); - } - if (xtty != NULL) { - xtty->tail("print_dtrace_nmethod"); - } - } -} -#endif // def HAVE_DTRACE_H - -void* nmethod::operator new(size_t size, int nmethod_size) { + } + if (nm != NULL) { + // To make dependency checking during class loading fast, record + // the nmethod dependencies in the classes it is dependent on. + // This allows the dependency checking code to simply walk the + // class hierarchy above the loaded class, checking only nmethods + // which are dependent on those classes. The slow way is to + // check every nmethod for dependencies which makes it linear in + // the number of methods compiled. For applications with a lot + // classes the slow way is too slow. + for (Dependencies::DepStream deps(nm); deps.next(); ) { + Klass* klass = deps.context_type(); + if (klass == NULL) continue; // ignore things like evol_method + + // record this nmethod as dependent on this klass + InstanceKlass::cast(klass)->add_dependent_nmethod(nm); + } + } + if (nm != NULL) nmethod_stats.note_nmethod(nm); + if (PrintAssembly && nm != NULL) + Disassembler::decode(nm); + } + + // verify nmethod + debug_only(if (nm) nm->verify();) // might block + + if (nm != NULL) { + nm->log_new_nmethod(); + } + + // done + return nm; +} + + +// For native wrappers +nmethod::nmethod( + Method* method, + int nmethod_size, + int compile_id, + CodeOffsets* offsets, + CodeBuffer* code_buffer, + int frame_size, + ByteSize basic_lock_owner_sp_offset, + ByteSize basic_lock_sp_offset, + OopMapSet* oop_maps ) + : CodeBlob("native nmethod", code_buffer, sizeof(nmethod), + nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps), + _native_receiver_sp_offset(basic_lock_owner_sp_offset), + _native_basic_lock_sp_offset(basic_lock_sp_offset) +{ + { + debug_only(No_Safepoint_Verifier nsv;) + assert_locked_or_safepoint(CodeCache_lock); + + init_defaults(); + _method = method; + _entry_bci = InvocationEntryBci; + // We have no exception handler or deopt handler make the + // values something that will never match a pc like the nmethod vtable entry + _exception_offset = 0; + _deoptimize_offset = 0; + _deoptimize_mh_offset = 0; + _orig_pc_offset = 0; + + _consts_offset = data_offset(); + _stub_offset = data_offset(); + _oops_offset = data_offset(); + _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); + _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); + _scopes_pcs_offset = _scopes_data_offset; + _dependencies_offset = _scopes_pcs_offset; + _handler_table_offset = _dependencies_offset; + _nul_chk_table_offset = _handler_table_offset; + _leaf_graph_ids_offset = _nul_chk_table_offset; + _nmethod_end_offset = _leaf_graph_ids_offset; + _compile_id = compile_id; + _comp_level = CompLevel_none; + _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); + _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); + _osr_entry_point = NULL; + _exception_cache = NULL; + _pc_desc_cache.reset_to(NULL); + + code_buffer->copy_values_to(this); + if (ScavengeRootsInCode && detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } + debug_only(verify_scavenge_root_oops()); + CodeCache::commit(this); + } + + if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { + ttyLocker ttyl; // keep the following output all in one block + // This output goes directly to the tty, not the compiler log. + // To enable tools to match it up with the compilation activity, + // be sure to tag this tty output with the compile ID. + if (xtty != NULL) { + xtty->begin_head("print_native_nmethod"); + xtty->method(_method); + xtty->stamp(); + xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this); + } + // print the header part first + print(); + // then print the requested information + if (PrintNativeNMethods) { + print_code(); + if (oop_maps != NULL) { + oop_maps->print(); + } + } + if (PrintRelocations) { + print_relocations(); + } + if (xtty != NULL) { + xtty->tail("print_native_nmethod"); + } + } +} + +// For dtrace wrappers +#ifdef HAVE_DTRACE_H +nmethod::nmethod( + Method* method, + int nmethod_size, + CodeOffsets* offsets, + CodeBuffer* code_buffer, + int frame_size) + : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod), + nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL), + _native_receiver_sp_offset(in_ByteSize(-1)), + _native_basic_lock_sp_offset(in_ByteSize(-1)) +{ + { + debug_only(No_Safepoint_Verifier nsv;) + assert_locked_or_safepoint(CodeCache_lock); + + init_defaults(); + _method = method; + _entry_bci = InvocationEntryBci; + // We have no exception handler or deopt handler make the + // values something that will never match a pc like the nmethod vtable entry + _exception_offset = 0; + _deoptimize_offset = 0; + _deoptimize_mh_offset = 0; + _unwind_handler_offset = -1; + _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); + _orig_pc_offset = 0; + _consts_offset = data_offset(); + _stub_offset = data_offset(); + _oops_offset = data_offset(); + _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); + _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); + _scopes_pcs_offset = _scopes_data_offset; + _dependencies_offset = _scopes_pcs_offset; + _handler_table_offset = _dependencies_offset; + _nul_chk_table_offset = _handler_table_offset; + _nmethod_end_offset = _nul_chk_table_offset; + _compile_id = 0; // default + _comp_level = CompLevel_none; + _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); + _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); + _osr_entry_point = NULL; + _exception_cache = NULL; + _pc_desc_cache.reset_to(NULL); + + code_buffer->copy_values_to(this); + debug_only(verify_scavenge_root_oops()); + CodeCache::commit(this); + } + + if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { + ttyLocker ttyl; // keep the following output all in one block + // This output goes directly to the tty, not the compiler log. + // To enable tools to match it up with the compilation activity, + // be sure to tag this tty output with the compile ID. + if (xtty != NULL) { + xtty->begin_head("print_dtrace_nmethod"); + xtty->method(_method); + xtty->stamp(); + xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this); + } + // print the header part first + print(); + // then print the requested information + if (PrintNMethods) { + print_code(); + } + if (PrintRelocations) { + print_relocations(); + } + if (xtty != NULL) { + xtty->tail("print_dtrace_nmethod"); + } + } +} +#endif // def HAVE_DTRACE_H + +void* nmethod::operator new(size_t size, int nmethod_size) { void* alloc = CodeCache::allocate(nmethod_size); guarantee(alloc != NULL, "CodeCache should have enough space"); return alloc; -} - - -nmethod::nmethod( - Method* method, - int nmethod_size, - int compile_id, - int entry_bci, - CodeOffsets* offsets, - int orig_pc_offset, - DebugInformationRecorder* debug_info, - Dependencies* dependencies, - CodeBuffer *code_buffer, - int frame_size, - OopMapSet* oop_maps, - ExceptionHandlerTable* handler_table, - ImplicitExceptionTable* nul_chk_table, - AbstractCompiler* compiler, - int comp_level, - GrowableArray* leaf_graph_ids -#ifdef GRAAL - , Handle installed_code, - Handle triggered_deoptimizations -#endif - ) - : CodeBlob("nmethod", code_buffer, sizeof(nmethod), - nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps), - _native_receiver_sp_offset(in_ByteSize(-1)), - _native_basic_lock_sp_offset(in_ByteSize(-1)) -{ - assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); - { - debug_only(No_Safepoint_Verifier nsv;) - assert_locked_or_safepoint(CodeCache_lock); - - init_defaults(); - _method = method; - _entry_bci = entry_bci; - _compile_id = compile_id; - _comp_level = comp_level; - _compiler = compiler; - _orig_pc_offset = orig_pc_offset; - - // Section offsets - _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); - _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); - -#ifdef GRAAL - _graal_installed_code = installed_code(); - _triggered_deoptimizations = (typeArrayOop)triggered_deoptimizations(); -#endif - if (compiler->is_graal()) { - // Graal might not produce any stub sections - if (offsets->value(CodeOffsets::Exceptions) != -1) { - _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); - } else { - _exception_offset = -1; - } - if (offsets->value(CodeOffsets::Deopt) != -1) { - _deoptimize_offset = code_offset() + offsets->value(CodeOffsets::Deopt); - } else { - _deoptimize_offset = -1; - } - if (offsets->value(CodeOffsets::DeoptMH) != -1) { - _deoptimize_mh_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH); - } else { - _deoptimize_mh_offset = -1; - } - } else { - // Exception handler and deopt handler are in the stub section - assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set"); - assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set"); - - _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); - _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); - if (offsets->value(CodeOffsets::DeoptMH) != -1) { - _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); - } else { - _deoptimize_mh_offset = -1; - } - } - if (offsets->value(CodeOffsets::UnwindHandler) != -1) { - _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); - } else { - _unwind_handler_offset = -1; - } - - int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize); - - _oops_offset = data_offset(); - _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); - _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); - - _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); - _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); - _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); - _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); - _leaf_graph_ids_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); - _nmethod_end_offset = _leaf_graph_ids_offset + leaf_graph_ids_size; - - _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); - _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); - _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); - _exception_cache = NULL; - _pc_desc_cache.reset_to(scopes_pcs_begin()); - - // Copy contents of ScopeDescRecorder to nmethod - code_buffer->copy_values_to(this); - debug_info->copy_to(this); - dependencies->copy_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); - } - debug_only(verify_scavenge_root_oops()); - - CodeCache::commit(this); - - // Copy contents of ExceptionHandlerTable to nmethod - handler_table->copy_to(this); - nul_chk_table->copy_to(this); - - if (leaf_graph_ids != NULL && leaf_graph_ids_size > 0) { - memcpy(leaf_graph_ids_begin(), leaf_graph_ids->adr_at(0), leaf_graph_ids_size); - } - - // we use the information of entry points to find out if a method is - // static or non static - assert(compiler->is_c2() || - _method->is_static() == (entry_point() == _verified_entry_point), - " entry points must be same for static methods and vice versa"); - } - - bool printnmethods = PrintNMethods - || CompilerOracle::should_print(_method) - || CompilerOracle::has_option_string(_method, "PrintNMethods"); - if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { - print_nmethod(printnmethods); - } -} - - -// Print a short set of xml attributes to identify this nmethod. The -// output should be embedded in some other element. -void nmethod::log_identity(xmlStream* log) const { - log->print(" compile_id='%d'", compile_id()); - const char* nm_kind = compile_kind(); - if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind); - if (compiler() != NULL) { - log->print(" compiler='%s'", compiler()->name()); - } - if (TieredCompilation) { - log->print(" level='%d'", comp_level()); - } -} - - -#define LOG_OFFSET(log, name) \ - if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \ - log->print(" " XSTR(name) "_offset='%d'" , \ - (intptr_t)name##_begin() - (intptr_t)this) - - -void nmethod::log_new_nmethod() const { - if (LogCompilation && xtty != NULL) { - ttyLocker ttyl; - HandleMark hm; - xtty->begin_elem("nmethod"); - log_identity(xtty); - xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size()); - xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this); - - LOG_OFFSET(xtty, relocation); - LOG_OFFSET(xtty, consts); - LOG_OFFSET(xtty, insts); - LOG_OFFSET(xtty, stub); - LOG_OFFSET(xtty, scopes_data); - LOG_OFFSET(xtty, scopes_pcs); - LOG_OFFSET(xtty, dependencies); - LOG_OFFSET(xtty, handler_table); - LOG_OFFSET(xtty, nul_chk_table); - LOG_OFFSET(xtty, oops); - - xtty->method(method()); - xtty->stamp(); - xtty->end_elem(); - } -} - -#undef LOG_OFFSET - - -// Print out more verbose output usually for a newly created nmethod. -void nmethod::print_on(outputStream* st, const char* msg) const { - if (st != NULL) { - ttyLocker ttyl; - if (WizardMode) { - CompileTask::print_compilation(st, this, msg, /*short_form:*/ true); - st->print_cr(" (" INTPTR_FORMAT ")", this); - } else { - CompileTask::print_compilation(st, this, msg, /*short_form:*/ false); - } - } -} - - -void nmethod::print_nmethod(bool printmethod) { - ttyLocker ttyl; // keep the following output all in one block - if (xtty != NULL) { - xtty->begin_head("print_nmethod"); - xtty->stamp(); - xtty->end_head(); - } - // print the header part first - print(); - // then print the requested information - if (printmethod) { - print_code(); - print_pcs(); - if (oop_maps()) { - oop_maps()->print(); - } - } - if (PrintDebugInfo) { - print_scopes(); - } - if (PrintRelocations) { - print_relocations(); - } - if (PrintDependencies) { - print_dependencies(); - } - if (PrintExceptionHandlers) { - print_handler_table(); - print_nul_chk_table(); - } - if (xtty != NULL) { - xtty->tail("print_nmethod"); - } -} - - -// Promote one word from an assembly-time handle to a live embedded oop. -inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { - if (handle == NULL || - // As a special case, IC oops are initialized to 1 or -1. - handle == (jobject) Universe::non_oop_word()) { - (*dest) = (oop) handle; - } else { - (*dest) = JNIHandles::resolve_non_null(handle); - } -} - - -// Have to have the same name because it's called by a template -void nmethod::copy_values(GrowableArray* array) { - int length = array->length(); - assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough"); - oop* dest = oops_begin(); - for (int index = 0 ; index < length; index++) { - initialize_immediate_oop(&dest[index], array->at(index)); - } - - // Now we can fix up all the oops in the code. We need to do this - // in the code because the assembler uses jobjects as placeholders. - // The code and relocations have already been initialized by the - // CodeBlob constructor, so it is valid even at this early point to - // iterate over relocations and patch the code. - fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true); -} - -void nmethod::copy_values(GrowableArray* array) { - int length = array->length(); - assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough"); - Metadata** dest = metadata_begin(); - for (int index = 0 ; index < length; index++) { - dest[index] = array->at(index); - } -} - -bool nmethod::is_at_poll_return(address pc) { - RelocIterator iter(this, pc, pc+1); - while (iter.next()) { - if (iter.type() == relocInfo::poll_return_type) - return true; - } - return false; -} - - -bool nmethod::is_at_poll_or_poll_return(address pc) { - RelocIterator iter(this, pc, pc+1); - while (iter.next()) { - relocInfo::relocType t = iter.type(); - if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) - return true; - } - return false; -} - - -void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { - // re-patch all oop-bearing instructions, just in case some oops moved - RelocIterator iter(this, begin, end); - while (iter.next()) { - if (iter.type() == relocInfo::oop_type) { - oop_Relocation* reloc = iter.oop_reloc(); - if (initialize_immediates && reloc->oop_is_immediate()) { - oop* dest = reloc->oop_addr(); - initialize_immediate_oop(dest, (jobject) *dest); - } - // Refresh the oop-related bits of this instruction. - reloc->fix_oop_relocation(); - } else if (iter.type() == relocInfo::metadata_type) { - metadata_Relocation* reloc = iter.metadata_reloc(); - reloc->fix_metadata_relocation(); - } - - // There must not be any interfering patches or breakpoints. - assert(!(iter.type() == relocInfo::breakpoint_type - && iter.breakpoint_reloc()->active()), - "no active breakpoint"); - } -} - - -void nmethod::verify_oop_relocations() { - // Ensure sure that the code matches the current oop values - RelocIterator iter(this, NULL, NULL); - while (iter.next()) { - if (iter.type() == relocInfo::oop_type) { - oop_Relocation* reloc = iter.oop_reloc(); - if (!reloc->oop_is_immediate()) { - reloc->verify_oop_relocation(); - } - } - } -} - - -ScopeDesc* nmethod::scope_desc_at(address pc) { - PcDesc* pd = pc_desc_at(pc); - guarantee(pd != NULL, "scope must be present"); - return new ScopeDesc(this, pd->scope_decode_offset(), - pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), - pd->return_oop()); -} - - -void nmethod::clear_inline_caches() { - assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); - if (is_zombie()) { - return; - } - - RelocIterator iter(this); - while (iter.next()) { - iter.reloc()->clear_inline_cache(); - } -} - - -void nmethod::cleanup_inline_caches() { - - assert_locked_or_safepoint(CompiledIC_lock); - - // If the method is not entrant or zombie then a JMP is plastered over the - // first few bytes. If an oop in the old code was there, that oop - // should not get GC'd. Skip the first few bytes of oops on - // not-entrant methods. - address low_boundary = verified_entry_point(); - if (!is_in_use()) { - low_boundary += NativeJump::instruction_size; - // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. - // This means that the low_boundary is going to be a little too high. - // This shouldn't matter, since oops of non-entrant methods are never used. - // In fact, why are we bothering to look at oops in a non-entrant method?? - } - - // Find all calls in an nmethod, and clear the ones that points to zombie methods - ResourceMark rm; - RelocIterator iter(this, low_boundary); - while(iter.next()) { - switch(iter.type()) { - case relocInfo::virtual_call_type: - case relocInfo::opt_virtual_call_type: { - CompiledIC *ic = CompiledIC_at(iter.reloc()); - // Ok, to lookup references to zombies here - CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); - if( cb != NULL && cb->is_nmethod() ) { - nmethod* nm = (nmethod*)cb; - // Clean inline caches pointing to both zombie and not_entrant methods - if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(); - } - break; - } - case relocInfo::static_call_type: { - CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); - CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); - if( cb != NULL && cb->is_nmethod() ) { - nmethod* nm = (nmethod*)cb; - // Clean inline caches pointing to both zombie and not_entrant methods - if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); - } - break; - } - } - } -} - -// This is a private interface with the sweeper. -void nmethod::mark_as_seen_on_stack() { - assert(is_not_entrant(), "must be a non-entrant method"); - // Set the traversal mark to ensure that the sweeper does 2 - // cleaning passes before moving to zombie. - set_stack_traversal_mark(NMethodSweeper::traversal_count()); -} - -// Tell if a non-entrant method can be converted to a zombie (i.e., -// there are no activations on the stack, not in use by the VM, -// and not in use by the ServiceThread) -bool nmethod::can_not_entrant_be_converted() { - assert(is_not_entrant(), "must be a non-entrant method"); - - // Since the nmethod sweeper only does partial sweep the sweeper's traversal - // count can be greater than the stack traversal count before it hits the - // nmethod for the second time. - return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && - !is_locked_by_vm(); -} - -void nmethod::inc_decompile_count() { - if (!is_compiled_by_c2() && !is_compiled_by_graal()) return; - // Could be gated by ProfileTraps, but do not bother... - Method* m = method(); - if (m == NULL) return; - MethodData* mdo = m->method_data(); - if (mdo == NULL) return; - // There is a benign race here. See comments in methodData.hpp. - mdo->inc_decompile_count(); -} - -void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { - - post_compiled_method_unload(); - - // Since this nmethod is being unloaded, make sure that dependencies - // recorded in instanceKlasses get flushed and pass non-NULL closure to - // indicate that this work is being done during a GC. - assert(Universe::heap()->is_gc_active(), "should only be called during gc"); - assert(is_alive != NULL, "Should be non-NULL"); - // A non-NULL is_alive closure indicates that this is being called during GC. - flush_dependencies(is_alive); - - // Break cycle between nmethod & method - if (TraceClassUnloading && WizardMode) { - tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT - " unloadable], Method*(" INTPTR_FORMAT - "), cause(" INTPTR_FORMAT ")", - this, (address)_method, (address)cause); - if (!Universe::heap()->is_gc_active()) - cause->klass()->print(); - } - // Unlink the osr method, so we do not look this up again - if (is_osr_method()) { - invalidate_osr_method(); - } - // If _method is already NULL the Method* is about to be unloaded, - // so we don't have to break the cycle. Note that it is possible to - // have the Method* live here, in case we unload the nmethod because - // it is pointing to some oop (other than the Method*) being unloaded. - if (_method != NULL) { - // OSR methods point to the Method*, but the Method* does not - // point back! - if (_method->code() == this) { - _method->clear_code(); // Break a cycle - } - _method = NULL; // Clear the method of this dead nmethod - } - -#ifdef GRAAL - // The method can only be unloaded after the pointer to the installed code - // Java wrapper is no longer alive. Here we need to clear out this weak - // reference to the dead object. - if (_graal_installed_code != NULL) { - _graal_installed_code = NULL; - } -#endif - - // Make the class unloaded - i.e., change state and notify sweeper - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - if (is_in_use()) { - // Transitioning directly from live to unloaded -- so - // we need to force a cache clean-up; remember this - // for later on. - CodeCache::set_needs_cache_clean(true); - } - _state = unloaded; - - // Log the unloading. - log_state_change(); - - // The Method* is gone at this point - assert(_method == NULL, "Tautology"); - - set_osr_link(NULL); - //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods - NMethodSweeper::notify(this); -} - -void nmethod::invalidate_osr_method() { - assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); - // Remove from list of active nmethods - if (method() != NULL) - method()->method_holder()->remove_osr_nmethod(this); - // Set entry as invalid - _entry_bci = InvalidOSREntryBci; -} - -void nmethod::log_state_change() const { - if (LogCompilation) { - if (xtty != NULL) { - ttyLocker ttyl; // keep the following output all in one block - if (_state == unloaded) { - xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", - os::current_thread_id()); - } else { - xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", - os::current_thread_id(), - (_state == zombie ? " zombie='1'" : "")); - } - log_identity(xtty); - xtty->stamp(); - xtty->end_elem(); - } - } - if (PrintCompilation && _state != unloaded) { - print_on(tty, _state == zombie ? "made zombie" : "made not entrant"); - } -} - -// Common functionality for both make_not_entrant and make_zombie -bool nmethod::make_not_entrant_or_zombie(unsigned int state) { - assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); - assert(!is_zombie(), "should not already be a zombie"); - - // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. - nmethodLocker nml(this); - methodHandle the_method(method()); - No_Safepoint_Verifier nsv; - - { - // invalidate osr nmethod before acquiring the patching lock since - // they both acquire leaf locks and we don't want a deadlock. - // This logic is equivalent to the logic below for patching the - // verified entry point of regular methods. - if (is_osr_method()) { - // this effectively makes the osr nmethod not entrant - invalidate_osr_method(); - } - - // Enter critical section. Does not block for safepoint. - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); - - if (_state == state) { - // another thread already performed this transition so nothing - // to do, but return false to indicate this. - return false; - } - - // The caller can be calling the method statically or through an inline - // cache call. - if (!is_osr_method() && !is_not_entrant()) { - address stub = SharedRuntime::get_handle_wrong_method_stub(); -#ifdef GRAAL - if (_graal_installed_code != NULL && !HotSpotInstalledCode::isDefault(_graal_installed_code)) { - // This was manually installed machine code. Patch entry with stub that throws an exception. - stub = SharedRuntime::get_deoptimized_installed_code_stub(); - } -#endif - NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), stub); - } - - if (is_in_use()) { - // It's a true state change, so mark the method as decompiled. - // Do it only for transition from alive. - inc_decompile_count(); - } - - // Change state - _state = state; - - // Log the transition once - log_state_change(); - - // Remove nmethod from method. - // We need to check if both the _code and _from_compiled_code_entry_point - // refer to this nmethod because there is a race in setting these two fields - // in Method* as seen in bugid 4947125. - // If the vep() points to the zombie nmethod, the memory for the nmethod - // could be flushed and the compiler and vtable stubs could still call - // through it. - if (method() != NULL && (method()->code() == this || - method()->from_compiled_entry() == verified_entry_point())) { - HandleMark hm; - method()->clear_code(); - } - - if (state == not_entrant) { - mark_as_seen_on_stack(); - } - - } // leave critical region under Patching_lock - - // When the nmethod becomes zombie it is no longer alive so the - // dependencies must be flushed. nmethods in the not_entrant - // state will be flushed later when the transition to zombie - // happens or they get unloaded. - if (state == zombie) { - { - // Flushing dependecies must be done before any possible - // safepoint can sneak in, otherwise the oops used by the - // dependency logic could have become stale. - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - flush_dependencies(NULL); - } - - // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload - // event and it hasn't already been reported for this nmethod then - // report it now. The event may have been reported earilier if the GC - // marked it for unloading). JvmtiDeferredEventQueue support means - // we no longer go to a safepoint here. - post_compiled_method_unload(); - -#ifdef ASSERT - // It's no longer safe to access the oops section since zombie - // nmethods aren't scanned for GC. - _oops_are_stale = true; -#endif - } else { - assert(state == not_entrant, "other cases may need to be handled differently"); - } - - if (TraceCreateZombies) { - ResourceMark m; - tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", this, this->method()->name_and_sig_as_C_string(), (state == not_entrant) ? "not entrant" : "zombie"); - } - - // Make sweeper aware that there is a zombie method that needs to be removed - NMethodSweeper::notify(this); - - return true; -} - -void nmethod::flush() { - // Note that there are no valid oops in the nmethod anymore. - assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method"); - assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation"); - - assert (!is_locked_by_vm(), "locked methods shouldn't be flushed"); - assert_locked_or_safepoint(CodeCache_lock); - - // completely deallocate this method - Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this); - if (PrintMethodFlushing) { - tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb", - _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024); - } - - // We need to deallocate any ExceptionCache data. - // Note that we do not need to grab the nmethod lock for this, it - // better be thread safe if we're disposing of it! - ExceptionCache* ec = exception_cache(); - set_exception_cache(NULL); - while(ec != NULL) { - ExceptionCache* next = ec->next(); - delete ec; - ec = next; - } - - if (on_scavenge_root_list()) { - CodeCache::drop_scavenge_root_nmethod(this); - } - - if (is_speculatively_disconnected()) { - CodeCache::remove_saved_code(this); - } - -#ifdef SHARK - ((SharkCompiler *) compiler())->free_compiled_method(insts_begin()); -#endif // SHARK - - ((CodeBlob*)(this))->flush(); - - CodeCache::free(this); -} - - -// -// Notify all classes this nmethod is dependent on that it is no -// longer dependent. This should only be called in two situations. -// First, when a nmethod transitions to a zombie all dependents need -// to be clear. Since zombification happens at a safepoint there's no -// synchronization issues. The second place is a little more tricky. -// During phase 1 of mark sweep class unloading may happen and as a -// result some nmethods may get unloaded. In this case the flushing -// of dependencies must happen during phase 1 since after GC any -// dependencies in the unloaded nmethod won't be updated, so -// traversing the dependency information in unsafe. In that case this -// function is called with a non-NULL argument and this function only -// notifies instanceKlasses that are reachable - -void nmethod::flush_dependencies(BoolObjectClosure* is_alive) { - assert_locked_or_safepoint(CodeCache_lock); - assert(Universe::heap()->is_gc_active() == (is_alive != NULL), - "is_alive is non-NULL if and only if we are called during GC"); - if (!has_flushed_dependencies()) { - set_has_flushed_dependencies(); - for (Dependencies::DepStream deps(this); deps.next(); ) { - Klass* klass = deps.context_type(); - if (klass == NULL) continue; // ignore things like evol_method - - // During GC the is_alive closure is non-NULL, and is used to - // determine liveness of dependees that need to be updated. - if (is_alive == NULL || klass->is_loader_alive(is_alive)) { - InstanceKlass::cast(klass)->remove_dependent_nmethod(this); - } - } - } -} - - -// If this oop is not live, the nmethod can be unloaded. -bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) { - assert(root != NULL, "just checking"); - oop obj = *root; - if (obj == NULL || is_alive->do_object_b(obj)) { - return false; - } - - // If ScavengeRootsInCode is true, an nmethod might be unloaded - // simply because one of its constant oops has gone dead. - // No actual classes need to be unloaded in order for this to occur. - assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading"); - make_unloaded(is_alive, obj); - return true; -} - -// ------------------------------------------------------------------ -// post_compiled_method_load_event -// new method for install_code() path -// Transfer information from compilation to jvmti -void nmethod::post_compiled_method_load_event() { - - Method* moop = method(); -#ifndef USDT2 - HS_DTRACE_PROBE8(hotspot, compiled__method__load, - moop->klass_name()->bytes(), - moop->klass_name()->utf8_length(), - moop->name()->bytes(), - moop->name()->utf8_length(), - moop->signature()->bytes(), - moop->signature()->utf8_length(), - insts_begin(), insts_size()); -#else /* USDT2 */ - HOTSPOT_COMPILED_METHOD_LOAD( - (char *) moop->klass_name()->bytes(), - moop->klass_name()->utf8_length(), - (char *) moop->name()->bytes(), - moop->name()->utf8_length(), - (char *) moop->signature()->bytes(), - moop->signature()->utf8_length(), - insts_begin(), insts_size()); -#endif /* USDT2 */ - - if (JvmtiExport::should_post_compiled_method_load() || - JvmtiExport::should_post_compiled_method_unload()) { - get_and_cache_jmethod_id(); - } - - if (JvmtiExport::should_post_compiled_method_load()) { - // Let the Service thread (which is a real Java thread) post the event - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); - JvmtiDeferredEventQueue::enqueue( - JvmtiDeferredEvent::compiled_method_load_event(this)); - } -} - -jmethodID nmethod::get_and_cache_jmethod_id() { - if (_jmethod_id == NULL) { - // Cache the jmethod_id since it can no longer be looked up once the - // method itself has been marked for unloading. - _jmethod_id = method()->jmethod_id(); - } - return _jmethod_id; -} - -void nmethod::post_compiled_method_unload() { - if (unload_reported()) { - // During unloading we transition to unloaded and then to zombie - // and the unloading is reported during the first transition. - return; - } - - assert(_method != NULL && !is_unloaded(), "just checking"); - DTRACE_METHOD_UNLOAD_PROBE(method()); - - // If a JVMTI agent has enabled the CompiledMethodUnload event then - // post the event. Sometime later this nmethod will be made a zombie - // by the sweeper but the Method* will not be valid at that point. - // If the _jmethod_id is null then no load event was ever requested - // so don't bother posting the unload. The main reason for this is - // that the jmethodID is a weak reference to the Method* so if - // it's being unloaded there's no way to look it up since the weak - // ref will have been cleared. - if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { - assert(!unload_reported(), "already unloaded"); - JvmtiDeferredEvent event = - JvmtiDeferredEvent::compiled_method_unload_event(this, - _jmethod_id, insts_begin()); - if (SafepointSynchronize::is_at_safepoint()) { - // Don't want to take the queueing lock. Add it as pending and - // it will get enqueued later. - JvmtiDeferredEventQueue::add_pending_event(event); - } else { - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); - JvmtiDeferredEventQueue::enqueue(event); - } - } - - // The JVMTI CompiledMethodUnload event can be enabled or disabled at - // any time. As the nmethod is being unloaded now we mark it has - // having the unload event reported - this will ensure that we don't - // attempt to report the event in the unlikely scenario where the - // event is enabled at the time the nmethod is made a zombie. - set_unload_reported(); -} - -// This is called at the end of the strong tracing/marking phase of a -// GC to unload an nmethod if it contains otherwise unreachable -// oops. - -void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { - // Make sure the oop's ready to receive visitors - assert(!is_zombie() && !is_unloaded(), - "should not call follow on zombie or unloaded nmethod"); - - // If the method is not entrant then a JMP is plastered over the - // first few bytes. If an oop in the old code was there, that oop - // should not get GC'd. Skip the first few bytes of oops on - // not-entrant methods. - address low_boundary = verified_entry_point(); - if (is_not_entrant()) { - low_boundary += NativeJump::instruction_size; - // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. - // (See comment above.) - } - - // The RedefineClasses() API can cause the class unloading invariant - // to no longer be true. See jvmtiExport.hpp for details. - // Also, leave a debugging breadcrumb in local flag. - bool a_class_was_redefined = JvmtiExport::has_redefined_a_class(); - if (a_class_was_redefined) { - // This set of the unloading_occurred flag is done before the - // call to post_compiled_method_unload() so that the unloading - // of this nmethod is reported. - unloading_occurred = true; - } - -#ifdef GRAAL - // Follow Graal method - if (_graal_installed_code != NULL) { - if (HotSpotInstalledCode::isDefault(_graal_installed_code)) { - if (!is_alive->do_object_b(_graal_installed_code)) { - _graal_installed_code = NULL; - } - } else { - if (can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) { - return; - } - } - } -#endif - - // Exception cache - ExceptionCache* ec = exception_cache(); - while (ec != NULL) { - Klass* ex_klass = ec->exception_type(); - ExceptionCache* next_ec = ec->next(); - if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { - remove_from_exception_cache(ec); - } - ec = next_ec; - } - - // If class unloading occurred we first iterate over all inline caches and - // clear ICs where the cached oop is referring to an unloaded klass or method. - // The remaining live cached oops will be traversed in the relocInfo::oop_type - // iteration below. - if (unloading_occurred) { - RelocIterator iter(this, low_boundary); - while(iter.next()) { - if (iter.type() == relocInfo::virtual_call_type) { - CompiledIC *ic = CompiledIC_at(iter.reloc()); - if (ic->is_icholder_call()) { - // The only exception is compiledICHolder oops which may - // yet be marked below. (We check this further below). - CompiledICHolder* cichk_oop = ic->cached_icholder(); - if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) && - cichk_oop->holder_klass()->is_loader_alive(is_alive)) { - continue; - } - } else { - Metadata* ic_oop = ic->cached_metadata(); - if (ic_oop != NULL) { - if (ic_oop->is_klass()) { - if (((Klass*)ic_oop)->is_loader_alive(is_alive)) { - continue; - } - } else if (ic_oop->is_method()) { - if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) { - continue; - } - } else { - ShouldNotReachHere(); - } - } - } - ic->set_to_clean(); - } - } - } - - // Compiled code - { - RelocIterator iter(this, low_boundary); - while (iter.next()) { - if (iter.type() == relocInfo::oop_type) { - oop_Relocation* r = iter.oop_reloc(); - // In this loop, we must only traverse those oops directly embedded in - // the code. Other oops (oop_index>0) are seen as part of scopes_oops. - assert(1 == (r->oop_is_immediate()) + - (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), - "oop must be found in exactly one place"); - if (r->oop_is_immediate() && r->oop_value() != NULL) { - if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) { - return; - } - } - } - } - } - - - // Scopes - for (oop* p = oops_begin(); p < oops_end(); p++) { - if (*p == Universe::non_oop_word()) continue; // skip non-oops - if (can_unload(is_alive, p, unloading_occurred)) { - return; - } - } - - // Ensure that all metadata is still alive - verify_metadata_loaders(low_boundary, is_alive); -} - -#ifdef ASSERT - -class CheckClass : AllStatic { - static BoolObjectClosure* _is_alive; - - // Check class_loader is alive for this bit of metadata. - static void check_class(Metadata* md) { - Klass* klass = NULL; - if (md->is_klass()) { - klass = ((Klass*)md); - } else if (md->is_method()) { - klass = ((Method*)md)->method_holder(); - } else if (md->is_methodData()) { - klass = ((MethodData*)md)->method()->method_holder(); - } else { - md->print(); - ShouldNotReachHere(); - } - assert(klass->is_loader_alive(_is_alive), "must be alive"); - } - public: - static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) { - assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint"); - _is_alive = is_alive; - nm->metadata_do(check_class); - } -}; - -// This is called during a safepoint so can use static data -BoolObjectClosure* CheckClass::_is_alive = NULL; -#endif // ASSERT - - -// Processing of oop references should have been sufficient to keep -// all strong references alive. Any weak references should have been -// cleared as well. Visit all the metadata and ensure that it's -// really alive. -void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) { -#ifdef ASSERT - RelocIterator iter(this, low_boundary); - while (iter.next()) { - // static_stub_Relocations may have dangling references to - // Method*s so trim them out here. Otherwise it looks like - // compiled code is maintaining a link to dead metadata. - address static_call_addr = NULL; - if (iter.type() == relocInfo::opt_virtual_call_type) { - CompiledIC* cic = CompiledIC_at(iter.reloc()); - if (!cic->is_call_to_interpreted()) { - static_call_addr = iter.addr(); - } - } else if (iter.type() == relocInfo::static_call_type) { - CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); - if (!csc->is_call_to_interpreted()) { - static_call_addr = iter.addr(); - } - } - if (static_call_addr != NULL) { - RelocIterator sciter(this, low_boundary); - while (sciter.next()) { - if (sciter.type() == relocInfo::static_stub_type && - sciter.static_stub_reloc()->static_call() == static_call_addr) { - sciter.static_stub_reloc()->clear_inline_cache(); - } - } - } - } - // Check that the metadata embedded in the nmethod is alive - CheckClass::do_check_class(is_alive, this); -#endif -} - - -// Iterate over metadata calling this function. Used by RedefineClasses -void nmethod::metadata_do(void f(Metadata*)) { - address low_boundary = verified_entry_point(); - if (is_not_entrant()) { - low_boundary += NativeJump::instruction_size; - // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. - // (See comment above.) - } - { - // Visit all immediate references that are embedded in the instruction stream. - RelocIterator iter(this, low_boundary); - while (iter.next()) { - if (iter.type() == relocInfo::metadata_type ) { - metadata_Relocation* r = iter.metadata_reloc(); - // In this lmetadata, we must only follow those metadatas directly embedded in - // the code. Other metadatas (oop_index>0) are seen as part of - // the metadata section below. - assert(1 == (r->metadata_is_immediate()) + - (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), - "metadata must be found in exactly one place"); - if (r->metadata_is_immediate() && r->metadata_value() != NULL) { - Metadata* md = r->metadata_value(); - f(md); - } - } - } - } - - // Visit the metadata section - for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { - if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops - Metadata* md = *p; - f(md); - } - // Call function Method*, not embedded in these other places. - if (_method != NULL) f(_method); -} - - -// This method is called twice during GC -- once while -// tracing the "active" nmethods on thread stacks during -// the (strong) marking phase, and then again when walking -// the code cache contents during the weak roots processing -// phase. The two uses are distinguished by means of the -// 'do_strong_roots_only' flag, which is true in the first -// case. We want to walk the weak roots in the nmethod -// only in the second case. The weak roots in the nmethod -// are the oops in the ExceptionCache and the InlineCache -// oops. -void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) { - // make sure the oops ready to receive visitors - assert(!is_zombie() && !is_unloaded(), - "should not call follow on zombie or unloaded nmethod"); - - // If the method is not entrant or zombie then a JMP is plastered over the - // first few bytes. If an oop in the old code was there, that oop - // should not get GC'd. Skip the first few bytes of oops on - // not-entrant methods. - address low_boundary = verified_entry_point(); - if (is_not_entrant()) { - low_boundary += NativeJump::instruction_size; - // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. - // (See comment above.) - } - -#ifdef GRAAL - if (_graal_installed_code != NULL) { - f->do_oop((oop*) &_graal_installed_code); - } - if (_triggered_deoptimizations != NULL) { - f->do_oop((oop*) &_triggered_deoptimizations); - } -#endif - - RelocIterator iter(this, low_boundary); - - while (iter.next()) { - if (iter.type() == relocInfo::oop_type ) { - oop_Relocation* r = iter.oop_reloc(); - // In this loop, we must only follow those oops directly embedded in - // the code. Other oops (oop_index>0) are seen as part of scopes_oops. - assert(1 == (r->oop_is_immediate()) + - (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), - "oop must be found in exactly one place"); - if (r->oop_is_immediate() && r->oop_value() != NULL) { - f->do_oop(r->oop_addr()); - } - } - } - - // Scopes - // This includes oop constants not inlined in the code stream. - for (oop* p = oops_begin(); p < oops_end(); p++) { - if (*p == Universe::non_oop_word()) continue; // skip non-oops - f->do_oop(p); - } -} - -#define NMETHOD_SENTINEL ((nmethod*)badAddress) - -nmethod* volatile nmethod::_oops_do_mark_nmethods; - -// An nmethod is "marked" if its _mark_link is set non-null. -// Even if it is the end of the linked list, it will have a non-null link value, -// as long as it is on the list. -// This code must be MP safe, because it is used from parallel GC passes. -bool nmethod::test_set_oops_do_mark() { - assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); - nmethod* observed_mark_link = _oops_do_mark_link; - if (observed_mark_link == NULL) { - // Claim this nmethod for this thread to mark. - observed_mark_link = (nmethod*) - Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL); - if (observed_mark_link == NULL) { - - // Atomically append this nmethod (now claimed) to the head of the list: - nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; - for (;;) { - nmethod* required_mark_nmethods = observed_mark_nmethods; - _oops_do_mark_link = required_mark_nmethods; - observed_mark_nmethods = (nmethod*) - Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods); - if (observed_mark_nmethods == required_mark_nmethods) - break; - } - // Mark was clear when we first saw this guy. - NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark")); - return false; - } - } - // On fall through, another racing thread marked this nmethod before we did. - return true; -} - -void nmethod::oops_do_marking_prologue() { - NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue")); - assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row"); - // We use cmpxchg_ptr instead of regular assignment here because the user - // may fork a bunch of threads, and we need them all to see the same state. - void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL); - guarantee(observed == NULL, "no races in this sequential code"); -} - -void nmethod::oops_do_marking_epilogue() { - assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row"); - nmethod* cur = _oops_do_mark_nmethods; - while (cur != NMETHOD_SENTINEL) { - assert(cur != NULL, "not NULL-terminated"); - nmethod* next = cur->_oops_do_mark_link; - cur->_oops_do_mark_link = NULL; - cur->fix_oop_relocations(); - NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark")); - cur = next; - } - void* required = _oops_do_mark_nmethods; - void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required); - guarantee(observed == required, "no races in this sequential code"); - NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]")); -} - -class DetectScavengeRoot: public OopClosure { - bool _detected_scavenge_root; -public: - DetectScavengeRoot() : _detected_scavenge_root(false) - { NOT_PRODUCT(_print_nm = NULL); } - bool detected_scavenge_root() { return _detected_scavenge_root; } - virtual void do_oop(oop* p) { - if ((*p) != NULL && (*p)->is_scavengable()) { - NOT_PRODUCT(maybe_print(p)); - _detected_scavenge_root = true; - } - } - virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } - -#ifndef PRODUCT - nmethod* _print_nm; - void maybe_print(oop* p) { - if (_print_nm == NULL) return; - if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root"); - tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")", - _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm), - (intptr_t)(*p), (intptr_t)p); - (*p)->print(); - } -#endif //PRODUCT -}; - -bool nmethod::detect_scavenge_root_oops() { - DetectScavengeRoot detect_scavenge_root; - NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this); - oops_do(&detect_scavenge_root); - return detect_scavenge_root.detected_scavenge_root(); -} - -// Method that knows how to preserve outgoing arguments at call. This method must be -// called with a frame corresponding to a Java invoke -void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { -#ifndef SHARK - if (!method()->is_native()) { - SimpleScopeDesc ssd(this, fr.pc()); - Bytecode_invoke call(ssd.method(), ssd.bci()); - // compiled invokedynamic call sites have an implicit receiver at - // resolution time, so make sure it gets GC'ed. - bool has_receiver = !call.is_invokestatic(); - Symbol* signature = call.signature(); - fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f); - } -#endif // !SHARK -} - - -oop nmethod::embeddedOop_at(u_char* p) { - RelocIterator iter(this, p, p + 1); - while (iter.next()) - if (iter.type() == relocInfo::oop_type) { - return iter.oop_reloc()->oop_value(); - } - return NULL; -} - - -inline bool includes(void* p, void* from, void* to) { - return from <= p && p < to; -} - - -void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) { - assert(count >= 2, "must be sentinel values, at least"); - -#ifdef ASSERT - // must be sorted and unique; we do a binary search in find_pc_desc() - int prev_offset = pcs[0].pc_offset(); - assert(prev_offset == PcDesc::lower_offset_limit, - "must start with a sentinel"); - for (int i = 1; i < count; i++) { - int this_offset = pcs[i].pc_offset(); - assert(this_offset > prev_offset, "offsets must be sorted"); - prev_offset = this_offset; - } - assert(prev_offset == PcDesc::upper_offset_limit, - "must end with a sentinel"); -#endif //ASSERT - - // Search for MethodHandle invokes and tag the nmethod. - for (int i = 0; i < count; i++) { - if (pcs[i].is_method_handle_invoke()) { - set_has_method_handle_invokes(true); - break; - } - } - assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler"); - - int size = count * sizeof(PcDesc); - assert(scopes_pcs_size() >= size, "oob"); - memcpy(scopes_pcs_begin(), pcs, size); - - // Adjust the final sentinel downward. - PcDesc* last_pc = &scopes_pcs_begin()[count-1]; - assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity"); - last_pc->set_pc_offset(content_size() + 1); - for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) { - // Fill any rounding gaps with copies of the last record. - last_pc[1] = last_pc[0]; - } - // The following assert could fail if sizeof(PcDesc) is not - // an integral multiple of oopSize (the rounding term). - // If it fails, change the logic to always allocate a multiple - // of sizeof(PcDesc), and fill unused words with copies of *last_pc. - assert(last_pc + 1 == scopes_pcs_end(), "must match exactly"); -} - -void nmethod::copy_scopes_data(u_char* buffer, int size) { - assert(scopes_data_size() >= size, "oob"); - memcpy(scopes_data_begin(), buffer, size); -} - - -#ifdef ASSERT -static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) { - PcDesc* lower = nm->scopes_pcs_begin(); - PcDesc* upper = nm->scopes_pcs_end(); - lower += 1; // exclude initial sentinel - PcDesc* res = NULL; - for (PcDesc* p = lower; p < upper; p++) { - NOT_PRODUCT(--nmethod_stats.pc_desc_tests); // don't count this call to match_desc - if (match_desc(p, pc_offset, approximate)) { - if (res == NULL) - res = p; - else - res = (PcDesc*) badAddress; - } - } - return res; -} -#endif - - -// Finds a PcDesc with real-pc equal to "pc" -PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) { - address base_address = code_begin(); - if ((pc < base_address) || - (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) { - return NULL; // PC is wildly out of range - } - int pc_offset = (int) (pc - base_address); - - // Check the PcDesc cache if it contains the desired PcDesc - // (This as an almost 100% hit rate.) - PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate); - if (res != NULL) { - assert(res == linear_search(this, pc_offset, approximate), "cache ok"); - return res; - } - - // Fallback algorithm: quasi-linear search for the PcDesc - // Find the last pc_offset less than the given offset. - // The successor must be the required match, if there is a match at all. - // (Use a fixed radix to avoid expensive affine pointer arithmetic.) - PcDesc* lower = scopes_pcs_begin(); - PcDesc* upper = scopes_pcs_end(); - upper -= 1; // exclude final sentinel - if (lower >= upper) return NULL; // native method; no PcDescs at all - -#define assert_LU_OK \ - /* invariant on lower..upper during the following search: */ \ - assert(lower->pc_offset() < pc_offset, "sanity"); \ - assert(upper->pc_offset() >= pc_offset, "sanity") - assert_LU_OK; - - // Use the last successful return as a split point. - PcDesc* mid = _pc_desc_cache.last_pc_desc(); - NOT_PRODUCT(++nmethod_stats.pc_desc_searches); - if (mid->pc_offset() < pc_offset) { - lower = mid; - } else { - upper = mid; - } - - // Take giant steps at first (4096, then 256, then 16, then 1) - const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1); - const int RADIX = (1 << LOG2_RADIX); - for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { - while ((mid = lower + step) < upper) { - assert_LU_OK; - NOT_PRODUCT(++nmethod_stats.pc_desc_searches); - if (mid->pc_offset() < pc_offset) { - lower = mid; - } else { - upper = mid; - break; - } - } - assert_LU_OK; - } - - // Sneak up on the value with a linear search of length ~16. - while (true) { - assert_LU_OK; - mid = lower + 1; - NOT_PRODUCT(++nmethod_stats.pc_desc_searches); - if (mid->pc_offset() < pc_offset) { - lower = mid; - } else { - upper = mid; - break; - } - } -#undef assert_LU_OK - - if (match_desc(upper, pc_offset, approximate)) { - assert(upper == linear_search(this, pc_offset, approximate), "search ok"); - _pc_desc_cache.add_pc_desc(upper); - return upper; - } else { - assert(NULL == linear_search(this, pc_offset, approximate), "search ok"); - return NULL; - } -} - - -bool nmethod::check_all_dependencies() { - bool found_check = false; - // wholesale check of all dependencies - for (Dependencies::DepStream deps(this); deps.next(); ) { - if (deps.check_dependency() != NULL) { - found_check = true; - NOT_DEBUG(break); - } - } - return found_check; // tell caller if we found anything -} - -bool nmethod::check_dependency_on(DepChange& changes) { - // What has happened: - // 1) a new class dependee has been added - // 2) dependee and all its super classes have been marked - bool found_check = false; // set true if we are upset - for (Dependencies::DepStream deps(this); deps.next(); ) { - // Evaluate only relevant dependencies. - if (deps.spot_check_dependency_at(changes) != NULL) { - found_check = true; - NOT_DEBUG(break); - } - } - return found_check; -} - -bool nmethod::is_evol_dependent_on(Klass* dependee) { - InstanceKlass *dependee_ik = InstanceKlass::cast(dependee); - Array* dependee_methods = dependee_ik->methods(); - for (Dependencies::DepStream deps(this); deps.next(); ) { - if (deps.type() == Dependencies::evol_method) { - Method* method = deps.method_argument(0); - for (int j = 0; j < dependee_methods->length(); j++) { - if (dependee_methods->at(j) == method) { - // RC_TRACE macro has an embedded ResourceMark - RC_TRACE(0x01000000, - ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", - _method->method_holder()->external_name(), - _method->name()->as_C_string(), - _method->signature()->as_C_string(), compile_id(), - method->method_holder()->external_name(), - method->name()->as_C_string(), - method->signature()->as_C_string())); - if (TraceDependencies || LogCompilation) - deps.log_dependency(dependee); - return true; - } - } - } - } - return false; -} - -// Called from mark_for_deoptimization, when dependee is invalidated. -bool nmethod::is_dependent_on_method(Method* dependee) { - for (Dependencies::DepStream deps(this); deps.next(); ) { - if (deps.type() != Dependencies::evol_method) - continue; - Method* method = deps.method_argument(0); - if (method == dependee) return true; - } - return false; -} - - -bool nmethod::is_patchable_at(address instr_addr) { - assert(insts_contains(instr_addr), "wrong nmethod used"); - if (is_zombie()) { - // a zombie may never be patched - return false; - } - return true; -} - - -address nmethod::continuation_for_implicit_exception(address pc) { - // Exception happened outside inline-cache check code => we are inside - // an active nmethod => use cpc to determine a return address - int exception_offset = pc - code_begin(); - int cont_offset = ImplicitExceptionTable(this).at( exception_offset ); -#ifdef ASSERT - if (cont_offset == 0) { - Thread* thread = ThreadLocalStorage::get_thread_slow(); - ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY - HandleMark hm(thread); - ResourceMark rm(thread); - CodeBlob* cb = CodeCache::find_blob(pc); - assert(cb != NULL && cb == this, ""); - tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc); - print(); - method()->print_codes(); - print_code(); - print_pcs(); - } -#endif - if (cont_offset == 0) { - // Let the normal error handling report the exception - return NULL; - } - return code_begin() + cont_offset; -} - - - -void nmethod_init() { - // make sure you didn't forget to adjust the filler fields - assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word"); -} - - -//------------------------------------------------------------------------------------------- - - -// QQQ might we make this work from a frame?? -nmethodLocker::nmethodLocker(address pc) { - CodeBlob* cb = CodeCache::find_blob(pc); - guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); - _nm = (nmethod*)cb; - lock_nmethod(_nm); -} - -// Only JvmtiDeferredEvent::compiled_method_unload_event() -// should pass zombie_ok == true. -void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) { - if (nm == NULL) return; - Atomic::inc(&nm->_lock_count); - guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); -} - -void nmethodLocker::unlock_nmethod(nmethod* nm) { - if (nm == NULL) return; - Atomic::dec(&nm->_lock_count); - guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock"); -} - - -// ----------------------------------------------------------------------------- -// nmethod::get_deopt_original_pc -// -// Return the original PC for the given PC if: -// (a) the given PC belongs to a nmethod and -// (b) it is a deopt PC -address nmethod::get_deopt_original_pc(const frame* fr) { - if (fr->cb() == NULL) return NULL; - - nmethod* nm = fr->cb()->as_nmethod_or_null(); - if (nm != NULL && nm->is_deopt_pc(fr->pc())) - return nm->get_original_pc(fr); - - return NULL; -} - - -// ----------------------------------------------------------------------------- -// MethodHandle - -bool nmethod::is_method_handle_return(address return_pc) { - if (!has_method_handle_invokes()) return false; - PcDesc* pd = pc_desc_at(return_pc); - if (pd == NULL) - return false; - return pd->is_method_handle_invoke(); -} - - -// ----------------------------------------------------------------------------- -// Verification - -class VerifyOopsClosure: public OopClosure { - nmethod* _nm; - bool _ok; -public: - VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { } - bool ok() { return _ok; } - virtual void do_oop(oop* p) { - if ((*p) == NULL || (*p)->is_oop()) return; - if (_ok) { - _nm->print_nmethod(true); - _ok = false; - } - tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", - (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); - } - virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } -}; - -void nmethod::verify() { - - // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant - // seems odd. - - if( is_zombie() || is_not_entrant() ) - return; - - // Make sure all the entry points are correctly aligned for patching. - NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); - - // assert(method()->is_oop(), "must be valid"); - - ResourceMark rm; - - if (!CodeCache::contains(this)) { - fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this)); - } - - if(is_native_method() ) - return; - - nmethod* nm = CodeCache::find_nmethod(verified_entry_point()); - if (nm != this) { - fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", - this)); - } - - for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { - if (! p->verify(this)) { - tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this); - } - } - - VerifyOopsClosure voc(this); - oops_do(&voc); - assert(voc.ok(), "embedded oops must be OK"); - verify_scavenge_root_oops(); - - verify_scopes(); -} - - -void nmethod::verify_interrupt_point(address call_site) { - // This code does not work in release mode since - // owns_lock only is available in debug mode. - CompiledIC* ic = NULL; - Thread *cur = Thread::current(); - if (CompiledIC_lock->owner() == cur || - ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && - SafepointSynchronize::is_at_safepoint())) { - ic = CompiledIC_at(this, call_site); - CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); - } else { - MutexLocker ml_verify (CompiledIC_lock); - ic = CompiledIC_at(this, call_site); - } - - PcDesc* pd = pc_desc_at(ic->end_of_call()); - assert(pd != NULL, "PcDesc must exist"); - for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), - pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), - pd->return_oop()); - !sd->is_top(); sd = sd->sender()) { - sd->verify(); - } -} - -void nmethod::verify_scopes() { - if( !method() ) return; // Runtime stubs have no scope - if (method()->is_native()) return; // Ignore stub methods. - // iterate through all interrupt point - // and verify the debug information is valid. - RelocIterator iter((nmethod*)this); - while (iter.next()) { - address stub = NULL; - switch (iter.type()) { - case relocInfo::virtual_call_type: - verify_interrupt_point(iter.addr()); - break; - case relocInfo::opt_virtual_call_type: - stub = iter.opt_virtual_call_reloc()->static_stub(); - verify_interrupt_point(iter.addr()); - break; - case relocInfo::static_call_type: - stub = iter.static_call_reloc()->static_stub(); - //verify_interrupt_point(iter.addr()); - break; - case relocInfo::runtime_call_type: - address destination = iter.reloc()->value(); - // Right now there is no way to find out which entries support - // an interrupt point. It would be nice if we had this - // information in a table. - break; - } - assert(stub == NULL || stub_contains(stub), "static call stub outside stub section"); - } -} - - -// ----------------------------------------------------------------------------- -// Non-product code -#ifndef PRODUCT - -class DebugScavengeRoot: public OopClosure { - nmethod* _nm; - bool _ok; -public: - DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { } - bool ok() { return _ok; } - virtual void do_oop(oop* p) { - if ((*p) == NULL || !(*p)->is_scavengable()) return; - if (_ok) { - _nm->print_nmethod(true); - _ok = false; - } - tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", - (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); - (*p)->print(); - } - virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } -}; - -void nmethod::verify_scavenge_root_oops() { - if (!on_scavenge_root_list()) { - // Actually look inside, to verify the claim that it's clean. - DebugScavengeRoot debug_scavenge_root(this); - oops_do(&debug_scavenge_root); - if (!debug_scavenge_root.ok()) - fatal("found an unadvertised bad scavengable oop in the code cache"); - } - assert(scavenge_root_not_marked(), ""); -} - -#endif // PRODUCT - -// Printing operations - -void nmethod::print() const { - ResourceMark rm; - ttyLocker ttyl; // keep the following output all in one block - - tty->print("Compiled method "); - - if (is_compiled_by_c1()) { - tty->print("(c1) "); - } else if (is_compiled_by_c2()) { - tty->print("(c2) "); - } else if (is_compiled_by_shark()) { - tty->print("(shark) "); - } else if (is_compiled_by_graal()) { - tty->print("(Graal) "); - } else { - tty->print("(nm) "); - } - - print_on(tty, NULL); - - if (WizardMode) { - tty->print("((nmethod*) "INTPTR_FORMAT ") ", this); - tty->print(" for method " INTPTR_FORMAT , (address)method()); - tty->print(" { "); - if (is_in_use()) tty->print("in_use "); - if (is_not_entrant()) tty->print("not_entrant "); - if (is_zombie()) tty->print("zombie "); - if (is_unloaded()) tty->print("unloaded "); - if (on_scavenge_root_list()) tty->print("scavenge_root "); - tty->print_cr("}:"); - } - if (size () > 0) tty->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - (address)this, - (address)this + size(), - size()); - if (relocation_size () > 0) tty->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - relocation_begin(), - relocation_end(), - relocation_size()); - if (consts_size () > 0) tty->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - consts_begin(), - consts_end(), - consts_size()); - if (insts_size () > 0) tty->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - insts_begin(), - insts_end(), - insts_size()); - if (stub_size () > 0) tty->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - stub_begin(), - stub_end(), - stub_size()); - if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - oops_begin(), - oops_end(), - oops_size()); - if (metadata_size () > 0) tty->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - metadata_begin(), - metadata_end(), - metadata_size()); - if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - scopes_data_begin(), - scopes_data_end(), - scopes_data_size()); - if (scopes_pcs_size () > 0) tty->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - scopes_pcs_begin(), - scopes_pcs_end(), - scopes_pcs_size()); - if (dependencies_size () > 0) tty->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - dependencies_begin(), - dependencies_end(), - dependencies_size()); - if (handler_table_size() > 0) tty->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - handler_table_begin(), - handler_table_end(), - handler_table_size()); - if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", - nul_chk_table_begin(), - nul_chk_table_end(), - nul_chk_table_size()); -} - -void nmethod::print_code() { - HandleMark hm; - ResourceMark m; - Disassembler::decode(this); -} - - -#ifndef PRODUCT - -void nmethod::print_scopes() { - // Find the first pc desc for all scopes in the code and print it. - ResourceMark rm; - for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { - if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null) - continue; - - ScopeDesc* sd = scope_desc_at(p->real_pc(this)); - sd->print_on(tty, p); - } -} - -void nmethod::print_dependencies() { - ResourceMark rm; - ttyLocker ttyl; // keep the following output all in one block - tty->print_cr("Dependencies:"); - for (Dependencies::DepStream deps(this); deps.next(); ) { - deps.print_dependency(); - Klass* ctxk = deps.context_type(); - if (ctxk != NULL) { - if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) { - tty->print_cr(" [nmethod<=klass]%s", ctxk->external_name()); - } - } - deps.log_dependency(); // put it into the xml log also - } -} - - -void nmethod::print_relocations() { - ResourceMark m; // in case methods get printed via the debugger - tty->print_cr("relocations:"); - RelocIterator iter(this); - iter.print(); - if (UseRelocIndex) { - jint* index_end = (jint*)relocation_end() - 1; - jint index_size = *index_end; - jint* index_start = (jint*)( (address)index_end - index_size ); - tty->print_cr(" index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size); - if (index_size > 0) { - jint* ip; - for (ip = index_start; ip+2 <= index_end; ip += 2) - tty->print_cr(" (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT, - ip[0], - ip[1], - header_end()+ip[0], - relocation_begin()-1+ip[1]); - for (; ip < index_end; ip++) - tty->print_cr(" (%d ?)", ip[0]); - tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip++); - tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip); - } - } -} - - -void nmethod::print_pcs() { - ResourceMark m; // in case methods get printed via debugger - tty->print_cr("pc-bytecode offsets:"); - for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { - p->print(this); - } -} - -#endif // PRODUCT - -const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { - RelocIterator iter(this, begin, end); - bool have_one = false; - while (iter.next()) { - have_one = true; - switch (iter.type()) { - case relocInfo::none: return "no_reloc"; - case relocInfo::oop_type: { - stringStream st; - oop_Relocation* r = iter.oop_reloc(); - oop obj = r->oop_value(); - st.print("oop("); - if (obj == NULL) st.print("NULL"); - else obj->print_value_on(&st); - st.print(")"); - return st.as_string(); - } - case relocInfo::metadata_type: { - stringStream st; - metadata_Relocation* r = iter.metadata_reloc(); - Metadata* obj = r->metadata_value(); - st.print("metadata("); - if (obj == NULL) st.print("NULL"); - else obj->print_value_on(&st); - st.print(")"); - return st.as_string(); - } - case relocInfo::virtual_call_type: return "virtual_call"; - case relocInfo::opt_virtual_call_type: return "optimized virtual_call"; - case relocInfo::static_call_type: return "static_call"; - case relocInfo::static_stub_type: return "static_stub"; - case relocInfo::runtime_call_type: return "runtime_call"; - case relocInfo::external_word_type: return "external_word"; - case relocInfo::internal_word_type: return "internal_word"; - case relocInfo::section_word_type: return "section_word"; - case relocInfo::poll_type: return "poll"; - case relocInfo::poll_return_type: return "poll_return"; - case relocInfo::type_mask: return "type_bit_mask"; - } - } - return have_one ? "other" : NULL; -} - -// Return a the last scope in (begin..end] -ScopeDesc* nmethod::scope_desc_in(address begin, address end) { - PcDesc* p = pc_desc_near(begin+1); - if (p != NULL && p->real_pc(this) <= end) { - return new ScopeDesc(this, p->scope_decode_offset(), - p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(), - p->return_oop()); - } - return NULL; -} - -void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const { - if (block_begin == entry_point()) stream->print_cr("[Entry Point]"); - if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]"); - if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin()) stream->print_cr("[Exception Handler]"); - if (block_begin == stub_begin()) stream->print_cr("[Stub Code]"); - if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]"); - - if (has_method_handle_invokes()) - if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]"); - - if (block_begin == consts_begin()) stream->print_cr("[Constants]"); - - if (block_begin == entry_point()) { - methodHandle m = method(); - if (m.not_null()) { - stream->print(" # "); - m->print_value_on(stream); - stream->cr(); - } - if (m.not_null() && !is_osr_method()) { - ResourceMark rm; - int sizeargs = m->size_of_parameters(); - BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); - VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); - { - int sig_index = 0; - if (!m->is_static()) - sig_bt[sig_index++] = T_OBJECT; // 'this' - for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) { - BasicType t = ss.type(); - sig_bt[sig_index++] = t; - if (type2size[t] == 2) { - sig_bt[sig_index++] = T_VOID; - } else { - assert(type2size[t] == 1, "size is 1 or 2"); - } - } - assert(sig_index == sizeargs, ""); - } - const char* spname = "sp"; // make arch-specific? - intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false); - int stack_slot_offset = this->frame_size() * wordSize; - int tab1 = 14, tab2 = 24; - int sig_index = 0; - int arg_index = (m->is_static() ? 0 : -1); - bool did_old_sp = false; - for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) { - bool at_this = (arg_index == -1); - bool at_old_sp = false; - BasicType t = (at_this ? T_OBJECT : ss.type()); - assert(t == sig_bt[sig_index], "sigs in sync"); - if (at_this) - stream->print(" # this: "); - else - stream->print(" # parm%d: ", arg_index); - stream->move_to(tab1); - VMReg fst = regs[sig_index].first(); - VMReg snd = regs[sig_index].second(); - if (fst->is_reg()) { - stream->print("%s", fst->name()); - if (snd->is_valid()) { - stream->print(":%s", snd->name()); - } - } else if (fst->is_stack()) { - int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset; - if (offset == stack_slot_offset) at_old_sp = true; - stream->print("[%s+0x%x]", spname, offset); - } else { - stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd); - } - stream->print(" "); - stream->move_to(tab2); - stream->print("= "); - if (at_this) { - m->method_holder()->print_value_on(stream); - } else { - bool did_name = false; - if (!at_this && ss.is_object()) { - Symbol* name = ss.as_symbol_or_null(); - if (name != NULL) { - name->print_value_on(stream); - did_name = true; - } - } - if (!did_name) - stream->print("%s", type2name(t)); - } - if (at_old_sp) { - stream->print(" (%s of caller)", spname); - did_old_sp = true; - } - stream->cr(); - sig_index += type2size[t]; - arg_index += 1; - if (!at_this) ss.next(); - } - if (!did_old_sp) { - stream->print(" # "); - stream->move_to(tab1); - stream->print("[%s+0x%x]", spname, stack_slot_offset); - stream->print(" (%s of caller)", spname); - stream->cr(); - } - } - } -} - -void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) { - // First, find an oopmap in (begin, end]. - // We use the odd half-closed interval so that oop maps and scope descs - // which are tied to the byte after a call are printed with the call itself. - address base = code_begin(); - OopMapSet* oms = oop_maps(); - if (oms != NULL) { - for (int i = 0, imax = oms->size(); i < imax; i++) { - OopMap* om = oms->at(i); - address pc = base + om->offset(); - if (pc > begin) { - if (pc <= end) { - st->move_to(column); - st->print("; "); - om->print_on(st); - } - break; - } - } - } - - // Print any debug info present at this pc. - ScopeDesc* sd = scope_desc_in(begin, end); - if (sd != NULL) { - st->move_to(column); - if (sd->bci() == SynchronizationEntryBCI) { - st->print(";*synchronization entry"); - } else { - if (sd->method() == NULL) { - st->print("method is NULL"); - } else if (sd->method()->is_native()) { - st->print("method is native"); - } else { - Bytecodes::Code bc = sd->method()->java_code_at(sd->bci()); - st->print(";*%s", Bytecodes::name(bc)); - switch (bc) { - case Bytecodes::_invokevirtual: - case Bytecodes::_invokespecial: - case Bytecodes::_invokestatic: - case Bytecodes::_invokeinterface: - { - Bytecode_invoke invoke(sd->method(), sd->bci()); - st->print(" "); - if (invoke.name() != NULL) - invoke.name()->print_symbol_on(st); - else - st->print(""); - break; - } - case Bytecodes::_getfield: - case Bytecodes::_putfield: - case Bytecodes::_getstatic: - case Bytecodes::_putstatic: - { - Bytecode_field field(sd->method(), sd->bci()); - st->print(" "); - if (field.name() != NULL) - field.name()->print_symbol_on(st); - else - st->print(""); - } - } - } - } - - // Print all scopes - for (;sd != NULL; sd = sd->sender()) { - st->move_to(column); - st->print("; -"); - if (sd->method() == NULL) { - st->print("method is NULL"); - } else { - sd->method()->print_short_name(st); - } - int lineno = sd->method()->line_number_from_bci(sd->bci()); - if (lineno != -1) { - st->print("@%d (line %d)", sd->bci(), lineno); - } else { - st->print("@%d", sd->bci()); - } - st->cr(); - } - } - - // Print relocation information - const char* str = reloc_string_for(begin, end); - if (str != NULL) { - if (sd != NULL) st->cr(); - st->move_to(column); - st->print("; {%s}", str); - } - int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin()); - if (cont_offset != 0) { - st->move_to(column); - st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset); - } - -} - -#ifndef PRODUCT - -void nmethod::print_value_on(outputStream* st) const { - st->print("nmethod"); - print_on(st, NULL); -} - -void nmethod::print_calls(outputStream* st) { - RelocIterator iter(this); - while (iter.next()) { - switch (iter.type()) { - case relocInfo::virtual_call_type: - case relocInfo::opt_virtual_call_type: { - VerifyMutexLocker mc(CompiledIC_lock); - CompiledIC_at(iter.reloc())->print(); - break; - } - case relocInfo::static_call_type: - st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr()); - compiledStaticCall_at(iter.reloc())->print(); - break; - } - } -} - -void nmethod::print_handler_table() { - ExceptionHandlerTable(this).print(); -} - -void nmethod::print_nul_chk_table() { - ImplicitExceptionTable(this).print(code_begin()); -} - -#endif // PRODUCT - -void nmethod::print_statistics() { - ttyLocker ttyl; - if (xtty != NULL) xtty->head("statistics type='nmethod'"); - nmethod_stats.print_native_nmethod_stats(); - nmethod_stats.print_nmethod_stats(); - DebugInformationRecorder::print_statistics(); - nmethod_stats.print_pc_stats(); - Dependencies::print_statistics(); - if (xtty != NULL) xtty->tail("statistics"); -} +} + + +nmethod::nmethod( + Method* method, + int nmethod_size, + int compile_id, + int entry_bci, + CodeOffsets* offsets, + int orig_pc_offset, + DebugInformationRecorder* debug_info, + Dependencies* dependencies, + CodeBuffer *code_buffer, + int frame_size, + OopMapSet* oop_maps, + ExceptionHandlerTable* handler_table, + ImplicitExceptionTable* nul_chk_table, + AbstractCompiler* compiler, + int comp_level, + GrowableArray* leaf_graph_ids +#ifdef GRAAL + , Handle installed_code, + Handle triggered_deoptimizations +#endif + ) + : CodeBlob("nmethod", code_buffer, sizeof(nmethod), + nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps), + _native_receiver_sp_offset(in_ByteSize(-1)), + _native_basic_lock_sp_offset(in_ByteSize(-1)) +{ + assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); + { + debug_only(No_Safepoint_Verifier nsv;) + assert_locked_or_safepoint(CodeCache_lock); + + init_defaults(); + _method = method; + _entry_bci = entry_bci; + _compile_id = compile_id; + _comp_level = comp_level; + _compiler = compiler; + _orig_pc_offset = orig_pc_offset; + + // Section offsets + _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); + _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); + +#ifdef GRAAL + _graal_installed_code = installed_code(); + _triggered_deoptimizations = (typeArrayOop)triggered_deoptimizations(); +#endif + if (compiler->is_graal()) { + // Graal might not produce any stub sections + if (offsets->value(CodeOffsets::Exceptions) != -1) { + _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); + } else { + _exception_offset = -1; + } + if (offsets->value(CodeOffsets::Deopt) != -1) { + _deoptimize_offset = code_offset() + offsets->value(CodeOffsets::Deopt); + } else { + _deoptimize_offset = -1; + } + if (offsets->value(CodeOffsets::DeoptMH) != -1) { + _deoptimize_mh_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH); + } else { + _deoptimize_mh_offset = -1; + } + } else { + // Exception handler and deopt handler are in the stub section + assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set"); + assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set"); + + _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); + _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); + if (offsets->value(CodeOffsets::DeoptMH) != -1) { + _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); + } else { + _deoptimize_mh_offset = -1; + } + } + if (offsets->value(CodeOffsets::UnwindHandler) != -1) { + _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); + } else { + _unwind_handler_offset = -1; + } + + int leaf_graph_ids_size = leaf_graph_ids == NULL ? 0 : round_to(sizeof(jlong) * leaf_graph_ids->length(), oopSize); + + _oops_offset = data_offset(); + _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); + _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize); + + _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); + _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); + _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); + _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); + _leaf_graph_ids_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); + _nmethod_end_offset = _leaf_graph_ids_offset + leaf_graph_ids_size; + + _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); + _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); + _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); + _exception_cache = NULL; + _pc_desc_cache.reset_to(scopes_pcs_begin()); + + // Copy contents of ScopeDescRecorder to nmethod + code_buffer->copy_values_to(this); + debug_info->copy_to(this); + dependencies->copy_to(this); + if (ScavengeRootsInCode && detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } + debug_only(verify_scavenge_root_oops()); + + CodeCache::commit(this); + + // Copy contents of ExceptionHandlerTable to nmethod + handler_table->copy_to(this); + nul_chk_table->copy_to(this); + + if (leaf_graph_ids != NULL && leaf_graph_ids_size > 0) { + memcpy(leaf_graph_ids_begin(), leaf_graph_ids->adr_at(0), leaf_graph_ids_size); + } + + // we use the information of entry points to find out if a method is + // static or non static + assert(compiler->is_c2() || + _method->is_static() == (entry_point() == _verified_entry_point), + " entry points must be same for static methods and vice versa"); + } + + bool printnmethods = PrintNMethods + || CompilerOracle::should_print(_method) + || CompilerOracle::has_option_string(_method, "PrintNMethods"); + if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { + print_nmethod(printnmethods); + } +} + + +// Print a short set of xml attributes to identify this nmethod. The +// output should be embedded in some other element. +void nmethod::log_identity(xmlStream* log) const { + log->print(" compile_id='%d'", compile_id()); + const char* nm_kind = compile_kind(); + if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind); + if (compiler() != NULL) { + log->print(" compiler='%s'", compiler()->name()); + } + if (TieredCompilation) { + log->print(" level='%d'", comp_level()); + } +} + + +#define LOG_OFFSET(log, name) \ + if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \ + log->print(" " XSTR(name) "_offset='%d'" , \ + (intptr_t)name##_begin() - (intptr_t)this) + + +void nmethod::log_new_nmethod() const { + if (LogCompilation && xtty != NULL) { + ttyLocker ttyl; + HandleMark hm; + xtty->begin_elem("nmethod"); + log_identity(xtty); + xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size()); + xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this); + + LOG_OFFSET(xtty, relocation); + LOG_OFFSET(xtty, consts); + LOG_OFFSET(xtty, insts); + LOG_OFFSET(xtty, stub); + LOG_OFFSET(xtty, scopes_data); + LOG_OFFSET(xtty, scopes_pcs); + LOG_OFFSET(xtty, dependencies); + LOG_OFFSET(xtty, handler_table); + LOG_OFFSET(xtty, nul_chk_table); + LOG_OFFSET(xtty, oops); + + xtty->method(method()); + xtty->stamp(); + xtty->end_elem(); + } +} + +#undef LOG_OFFSET + + +// Print out more verbose output usually for a newly created nmethod. +void nmethod::print_on(outputStream* st, const char* msg) const { + if (st != NULL) { + ttyLocker ttyl; + if (WizardMode) { + CompileTask::print_compilation(st, this, msg, /*short_form:*/ true); + st->print_cr(" (" INTPTR_FORMAT ")", this); + } else { + CompileTask::print_compilation(st, this, msg, /*short_form:*/ false); + } + } +} + + +void nmethod::print_nmethod(bool printmethod) { + ttyLocker ttyl; // keep the following output all in one block + if (xtty != NULL) { + xtty->begin_head("print_nmethod"); + xtty->stamp(); + xtty->end_head(); + } + // print the header part first + print(); + // then print the requested information + if (printmethod) { + print_code(); + print_pcs(); + if (oop_maps()) { + oop_maps()->print(); + } + } + if (PrintDebugInfo) { + print_scopes(); + } + if (PrintRelocations) { + print_relocations(); + } + if (PrintDependencies) { + print_dependencies(); + } + if (PrintExceptionHandlers) { + print_handler_table(); + print_nul_chk_table(); + } + if (xtty != NULL) { + xtty->tail("print_nmethod"); + } +} + + +// Promote one word from an assembly-time handle to a live embedded oop. +inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { + if (handle == NULL || + // As a special case, IC oops are initialized to 1 or -1. + handle == (jobject) Universe::non_oop_word()) { + (*dest) = (oop) handle; + } else { + (*dest) = JNIHandles::resolve_non_null(handle); + } +} + + +// Have to have the same name because it's called by a template +void nmethod::copy_values(GrowableArray* array) { + int length = array->length(); + assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough"); + oop* dest = oops_begin(); + for (int index = 0 ; index < length; index++) { + initialize_immediate_oop(&dest[index], array->at(index)); + } + + // Now we can fix up all the oops in the code. We need to do this + // in the code because the assembler uses jobjects as placeholders. + // The code and relocations have already been initialized by the + // CodeBlob constructor, so it is valid even at this early point to + // iterate over relocations and patch the code. + fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true); +} + +void nmethod::copy_values(GrowableArray* array) { + int length = array->length(); + assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough"); + Metadata** dest = metadata_begin(); + for (int index = 0 ; index < length; index++) { + dest[index] = array->at(index); + } +} + +bool nmethod::is_at_poll_return(address pc) { + RelocIterator iter(this, pc, pc+1); + while (iter.next()) { + if (iter.type() == relocInfo::poll_return_type) + return true; + } + return false; +} + + +bool nmethod::is_at_poll_or_poll_return(address pc) { + RelocIterator iter(this, pc, pc+1); + while (iter.next()) { + relocInfo::relocType t = iter.type(); + if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) + return true; + } + return false; +} + + +void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { + // re-patch all oop-bearing instructions, just in case some oops moved + RelocIterator iter(this, begin, end); + while (iter.next()) { + if (iter.type() == relocInfo::oop_type) { + oop_Relocation* reloc = iter.oop_reloc(); + if (initialize_immediates && reloc->oop_is_immediate()) { + oop* dest = reloc->oop_addr(); + initialize_immediate_oop(dest, (jobject) *dest); + } + // Refresh the oop-related bits of this instruction. + reloc->fix_oop_relocation(); + } else if (iter.type() == relocInfo::metadata_type) { + metadata_Relocation* reloc = iter.metadata_reloc(); + reloc->fix_metadata_relocation(); + } + + // There must not be any interfering patches or breakpoints. + assert(!(iter.type() == relocInfo::breakpoint_type + && iter.breakpoint_reloc()->active()), + "no active breakpoint"); + } +} + + +void nmethod::verify_oop_relocations() { + // Ensure sure that the code matches the current oop values + RelocIterator iter(this, NULL, NULL); + while (iter.next()) { + if (iter.type() == relocInfo::oop_type) { + oop_Relocation* reloc = iter.oop_reloc(); + if (!reloc->oop_is_immediate()) { + reloc->verify_oop_relocation(); + } + } + } +} + + +ScopeDesc* nmethod::scope_desc_at(address pc) { + PcDesc* pd = pc_desc_at(pc); + guarantee(pd != NULL, "scope must be present"); + return new ScopeDesc(this, pd->scope_decode_offset(), + pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), + pd->return_oop()); +} + + +void nmethod::clear_inline_caches() { + assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); + if (is_zombie()) { + return; + } + + RelocIterator iter(this); + while (iter.next()) { + iter.reloc()->clear_inline_cache(); + } +} + + +void nmethod::cleanup_inline_caches() { + + assert_locked_or_safepoint(CompiledIC_lock); + + // If the method is not entrant or zombie then a JMP is plastered over the + // first few bytes. If an oop in the old code was there, that oop + // should not get GC'd. Skip the first few bytes of oops on + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (!is_in_use()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // This means that the low_boundary is going to be a little too high. + // This shouldn't matter, since oops of non-entrant methods are never used. + // In fact, why are we bothering to look at oops in a non-entrant method?? + } + + // Find all calls in an nmethod, and clear the ones that points to zombie methods + ResourceMark rm; + RelocIterator iter(this, low_boundary); + while(iter.next()) { + switch(iter.type()) { + case relocInfo::virtual_call_type: + case relocInfo::opt_virtual_call_type: { + CompiledIC *ic = CompiledIC_at(iter.reloc()); + // Ok, to lookup references to zombies here + CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); + if( cb != NULL && cb->is_nmethod() ) { + nmethod* nm = (nmethod*)cb; + // Clean inline caches pointing to both zombie and not_entrant methods + if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(); + } + break; + } + case relocInfo::static_call_type: { + CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); + CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); + if( cb != NULL && cb->is_nmethod() ) { + nmethod* nm = (nmethod*)cb; + // Clean inline caches pointing to both zombie and not_entrant methods + if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); + } + break; + } + } + } +} + +// This is a private interface with the sweeper. +void nmethod::mark_as_seen_on_stack() { + assert(is_not_entrant(), "must be a non-entrant method"); + // Set the traversal mark to ensure that the sweeper does 2 + // cleaning passes before moving to zombie. + set_stack_traversal_mark(NMethodSweeper::traversal_count()); +} + +// Tell if a non-entrant method can be converted to a zombie (i.e., +// there are no activations on the stack, not in use by the VM, +// and not in use by the ServiceThread) +bool nmethod::can_not_entrant_be_converted() { + assert(is_not_entrant(), "must be a non-entrant method"); + + // Since the nmethod sweeper only does partial sweep the sweeper's traversal + // count can be greater than the stack traversal count before it hits the + // nmethod for the second time. + return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && + !is_locked_by_vm(); +} + +void nmethod::inc_decompile_count() { + if (!is_compiled_by_c2() && !is_compiled_by_graal()) return; + // Could be gated by ProfileTraps, but do not bother... + Method* m = method(); + if (m == NULL) return; + MethodData* mdo = m->method_data(); + if (mdo == NULL) return; + // There is a benign race here. See comments in methodData.hpp. + mdo->inc_decompile_count(); +} + +void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { + + post_compiled_method_unload(); + + // Since this nmethod is being unloaded, make sure that dependencies + // recorded in instanceKlasses get flushed and pass non-NULL closure to + // indicate that this work is being done during a GC. + assert(Universe::heap()->is_gc_active(), "should only be called during gc"); + assert(is_alive != NULL, "Should be non-NULL"); + // A non-NULL is_alive closure indicates that this is being called during GC. + flush_dependencies(is_alive); + + // Break cycle between nmethod & method + if (TraceClassUnloading && WizardMode) { + tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT + " unloadable], Method*(" INTPTR_FORMAT + "), cause(" INTPTR_FORMAT ")", + this, (address)_method, (address)cause); + if (!Universe::heap()->is_gc_active()) + cause->klass()->print(); + } + // Unlink the osr method, so we do not look this up again + if (is_osr_method()) { + invalidate_osr_method(); + } + // If _method is already NULL the Method* is about to be unloaded, + // so we don't have to break the cycle. Note that it is possible to + // have the Method* live here, in case we unload the nmethod because + // it is pointing to some oop (other than the Method*) being unloaded. + if (_method != NULL) { + // OSR methods point to the Method*, but the Method* does not + // point back! + if (_method->code() == this) { + _method->clear_code(); // Break a cycle + } + _method = NULL; // Clear the method of this dead nmethod + } + +#ifdef GRAAL + // The method can only be unloaded after the pointer to the installed code + // Java wrapper is no longer alive. Here we need to clear out this weak + // reference to the dead object. + if (_graal_installed_code != NULL) { + _graal_installed_code = NULL; + } +#endif + + // Make the class unloaded - i.e., change state and notify sweeper + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + if (is_in_use()) { + // Transitioning directly from live to unloaded -- so + // we need to force a cache clean-up; remember this + // for later on. + CodeCache::set_needs_cache_clean(true); + } + _state = unloaded; + + // Log the unloading. + log_state_change(); + + // The Method* is gone at this point + assert(_method == NULL, "Tautology"); + + set_osr_link(NULL); + //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods + NMethodSweeper::notify(this); +} + +void nmethod::invalidate_osr_method() { + assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); + // Remove from list of active nmethods + if (method() != NULL) + method()->method_holder()->remove_osr_nmethod(this); + // Set entry as invalid + _entry_bci = InvalidOSREntryBci; +} + +void nmethod::log_state_change() const { + if (LogCompilation) { + if (xtty != NULL) { + ttyLocker ttyl; // keep the following output all in one block + if (_state == unloaded) { + xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", + os::current_thread_id()); + } else { + xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", + os::current_thread_id(), + (_state == zombie ? " zombie='1'" : "")); + } + log_identity(xtty); + xtty->stamp(); + xtty->end_elem(); + } + } + if (PrintCompilation && _state != unloaded) { + print_on(tty, _state == zombie ? "made zombie" : "made not entrant"); + } +} + +// Common functionality for both make_not_entrant and make_zombie +bool nmethod::make_not_entrant_or_zombie(unsigned int state) { + assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); + assert(!is_zombie(), "should not already be a zombie"); + + // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. + nmethodLocker nml(this); + methodHandle the_method(method()); + No_Safepoint_Verifier nsv; + + { + // invalidate osr nmethod before acquiring the patching lock since + // they both acquire leaf locks and we don't want a deadlock. + // This logic is equivalent to the logic below for patching the + // verified entry point of regular methods. + if (is_osr_method()) { + // this effectively makes the osr nmethod not entrant + invalidate_osr_method(); + } + + // Enter critical section. Does not block for safepoint. + MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + + if (_state == state) { + // another thread already performed this transition so nothing + // to do, but return false to indicate this. + return false; + } + + // The caller can be calling the method statically or through an inline + // cache call. + if (!is_osr_method() && !is_not_entrant()) { + address stub = SharedRuntime::get_handle_wrong_method_stub(); +#ifdef GRAAL + if (_graal_installed_code != NULL && !HotSpotNmethod::isDefault(_graal_installed_code)) { + // This was manually installed machine code. Patch entry with stub that throws an exception. + stub = SharedRuntime::get_deoptimized_installed_code_stub(); + } +#endif + NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), stub); + } + + if (is_in_use()) { + // It's a true state change, so mark the method as decompiled. + // Do it only for transition from alive. + inc_decompile_count(); + } + + // Change state + _state = state; + + // Log the transition once + log_state_change(); + + // Remove nmethod from method. + // We need to check if both the _code and _from_compiled_code_entry_point + // refer to this nmethod because there is a race in setting these two fields + // in Method* as seen in bugid 4947125. + // If the vep() points to the zombie nmethod, the memory for the nmethod + // could be flushed and the compiler and vtable stubs could still call + // through it. + if (method() != NULL && (method()->code() == this || + method()->from_compiled_entry() == verified_entry_point())) { + HandleMark hm; + method()->clear_code(); + } + + if (state == not_entrant) { + mark_as_seen_on_stack(); + } + + } // leave critical region under Patching_lock + + // When the nmethod becomes zombie it is no longer alive so the + // dependencies must be flushed. nmethods in the not_entrant + // state will be flushed later when the transition to zombie + // happens or they get unloaded. + if (state == zombie) { + { + // Flushing dependecies must be done before any possible + // safepoint can sneak in, otherwise the oops used by the + // dependency logic could have become stale. + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + flush_dependencies(NULL); + } + + // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload + // event and it hasn't already been reported for this nmethod then + // report it now. The event may have been reported earilier if the GC + // marked it for unloading). JvmtiDeferredEventQueue support means + // we no longer go to a safepoint here. + post_compiled_method_unload(); + +#ifdef ASSERT + // It's no longer safe to access the oops section since zombie + // nmethods aren't scanned for GC. + _oops_are_stale = true; +#endif + } else { + assert(state == not_entrant, "other cases may need to be handled differently"); + } + + if (TraceCreateZombies) { + ResourceMark m; + tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", this, this->method()->name_and_sig_as_C_string(), (state == not_entrant) ? "not entrant" : "zombie"); + } + + // Make sweeper aware that there is a zombie method that needs to be removed + NMethodSweeper::notify(this); + + return true; +} + +void nmethod::flush() { + // Note that there are no valid oops in the nmethod anymore. + assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method"); + assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation"); + + assert (!is_locked_by_vm(), "locked methods shouldn't be flushed"); + assert_locked_or_safepoint(CodeCache_lock); + + // completely deallocate this method + Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this); + if (PrintMethodFlushing) { + tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb", + _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024); + } + + // We need to deallocate any ExceptionCache data. + // Note that we do not need to grab the nmethod lock for this, it + // better be thread safe if we're disposing of it! + ExceptionCache* ec = exception_cache(); + set_exception_cache(NULL); + while(ec != NULL) { + ExceptionCache* next = ec->next(); + delete ec; + ec = next; + } + + if (on_scavenge_root_list()) { + CodeCache::drop_scavenge_root_nmethod(this); + } + + if (is_speculatively_disconnected()) { + CodeCache::remove_saved_code(this); + } + +#ifdef SHARK + ((SharkCompiler *) compiler())->free_compiled_method(insts_begin()); +#endif // SHARK + + ((CodeBlob*)(this))->flush(); + + CodeCache::free(this); +} + + +// +// Notify all classes this nmethod is dependent on that it is no +// longer dependent. This should only be called in two situations. +// First, when a nmethod transitions to a zombie all dependents need +// to be clear. Since zombification happens at a safepoint there's no +// synchronization issues. The second place is a little more tricky. +// During phase 1 of mark sweep class unloading may happen and as a +// result some nmethods may get unloaded. In this case the flushing +// of dependencies must happen during phase 1 since after GC any +// dependencies in the unloaded nmethod won't be updated, so +// traversing the dependency information in unsafe. In that case this +// function is called with a non-NULL argument and this function only +// notifies instanceKlasses that are reachable + +void nmethod::flush_dependencies(BoolObjectClosure* is_alive) { + assert_locked_or_safepoint(CodeCache_lock); + assert(Universe::heap()->is_gc_active() == (is_alive != NULL), + "is_alive is non-NULL if and only if we are called during GC"); + if (!has_flushed_dependencies()) { + set_has_flushed_dependencies(); + for (Dependencies::DepStream deps(this); deps.next(); ) { + Klass* klass = deps.context_type(); + if (klass == NULL) continue; // ignore things like evol_method + + // During GC the is_alive closure is non-NULL, and is used to + // determine liveness of dependees that need to be updated. + if (is_alive == NULL || klass->is_loader_alive(is_alive)) { + InstanceKlass::cast(klass)->remove_dependent_nmethod(this); + } + } + } +} + + +// If this oop is not live, the nmethod can be unloaded. +bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) { + assert(root != NULL, "just checking"); + oop obj = *root; + if (obj == NULL || is_alive->do_object_b(obj)) { + return false; + } + + // If ScavengeRootsInCode is true, an nmethod might be unloaded + // simply because one of its constant oops has gone dead. + // No actual classes need to be unloaded in order for this to occur. + assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading"); + make_unloaded(is_alive, obj); + return true; +} + +// ------------------------------------------------------------------ +// post_compiled_method_load_event +// new method for install_code() path +// Transfer information from compilation to jvmti +void nmethod::post_compiled_method_load_event() { + + Method* moop = method(); +#ifndef USDT2 + HS_DTRACE_PROBE8(hotspot, compiled__method__load, + moop->klass_name()->bytes(), + moop->klass_name()->utf8_length(), + moop->name()->bytes(), + moop->name()->utf8_length(), + moop->signature()->bytes(), + moop->signature()->utf8_length(), + insts_begin(), insts_size()); +#else /* USDT2 */ + HOTSPOT_COMPILED_METHOD_LOAD( + (char *) moop->klass_name()->bytes(), + moop->klass_name()->utf8_length(), + (char *) moop->name()->bytes(), + moop->name()->utf8_length(), + (char *) moop->signature()->bytes(), + moop->signature()->utf8_length(), + insts_begin(), insts_size()); +#endif /* USDT2 */ + + if (JvmtiExport::should_post_compiled_method_load() || + JvmtiExport::should_post_compiled_method_unload()) { + get_and_cache_jmethod_id(); + } + + if (JvmtiExport::should_post_compiled_method_load()) { + // Let the Service thread (which is a real Java thread) post the event + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + JvmtiDeferredEventQueue::enqueue( + JvmtiDeferredEvent::compiled_method_load_event(this)); + } +} + +jmethodID nmethod::get_and_cache_jmethod_id() { + if (_jmethod_id == NULL) { + // Cache the jmethod_id since it can no longer be looked up once the + // method itself has been marked for unloading. + _jmethod_id = method()->jmethod_id(); + } + return _jmethod_id; +} + +void nmethod::post_compiled_method_unload() { + if (unload_reported()) { + // During unloading we transition to unloaded and then to zombie + // and the unloading is reported during the first transition. + return; + } + + assert(_method != NULL && !is_unloaded(), "just checking"); + DTRACE_METHOD_UNLOAD_PROBE(method()); + + // If a JVMTI agent has enabled the CompiledMethodUnload event then + // post the event. Sometime later this nmethod will be made a zombie + // by the sweeper but the Method* will not be valid at that point. + // If the _jmethod_id is null then no load event was ever requested + // so don't bother posting the unload. The main reason for this is + // that the jmethodID is a weak reference to the Method* so if + // it's being unloaded there's no way to look it up since the weak + // ref will have been cleared. + if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { + assert(!unload_reported(), "already unloaded"); + JvmtiDeferredEvent event = + JvmtiDeferredEvent::compiled_method_unload_event(this, + _jmethod_id, insts_begin()); + if (SafepointSynchronize::is_at_safepoint()) { + // Don't want to take the queueing lock. Add it as pending and + // it will get enqueued later. + JvmtiDeferredEventQueue::add_pending_event(event); + } else { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + JvmtiDeferredEventQueue::enqueue(event); + } + } + + // The JVMTI CompiledMethodUnload event can be enabled or disabled at + // any time. As the nmethod is being unloaded now we mark it has + // having the unload event reported - this will ensure that we don't + // attempt to report the event in the unlikely scenario where the + // event is enabled at the time the nmethod is made a zombie. + set_unload_reported(); +} + +// This is called at the end of the strong tracing/marking phase of a +// GC to unload an nmethod if it contains otherwise unreachable +// oops. + +void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { + // Make sure the oop's ready to receive visitors + assert(!is_zombie() && !is_unloaded(), + "should not call follow on zombie or unloaded nmethod"); + + // If the method is not entrant then a JMP is plastered over the + // first few bytes. If an oop in the old code was there, that oop + // should not get GC'd. Skip the first few bytes of oops on + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } + + // The RedefineClasses() API can cause the class unloading invariant + // to no longer be true. See jvmtiExport.hpp for details. + // Also, leave a debugging breadcrumb in local flag. + bool a_class_was_redefined = JvmtiExport::has_redefined_a_class(); + if (a_class_was_redefined) { + // This set of the unloading_occurred flag is done before the + // call to post_compiled_method_unload() so that the unloading + // of this nmethod is reported. + unloading_occurred = true; + } + +#ifdef GRAAL + // Follow Graal method + if (_graal_installed_code != NULL) { + if (HotSpotNmethod::isDefault(_graal_installed_code)) { + if (!is_alive->do_object_b(_graal_installed_code)) { + _graal_installed_code = NULL; + } + } else { + if (can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) { + return; + } + } + } +#endif + + // Exception cache + ExceptionCache* ec = exception_cache(); + while (ec != NULL) { + Klass* ex_klass = ec->exception_type(); + ExceptionCache* next_ec = ec->next(); + if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { + remove_from_exception_cache(ec); + } + ec = next_ec; + } + + // If class unloading occurred we first iterate over all inline caches and + // clear ICs where the cached oop is referring to an unloaded klass or method. + // The remaining live cached oops will be traversed in the relocInfo::oop_type + // iteration below. + if (unloading_occurred) { + RelocIterator iter(this, low_boundary); + while(iter.next()) { + if (iter.type() == relocInfo::virtual_call_type) { + CompiledIC *ic = CompiledIC_at(iter.reloc()); + if (ic->is_icholder_call()) { + // The only exception is compiledICHolder oops which may + // yet be marked below. (We check this further below). + CompiledICHolder* cichk_oop = ic->cached_icholder(); + if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) && + cichk_oop->holder_klass()->is_loader_alive(is_alive)) { + continue; + } + } else { + Metadata* ic_oop = ic->cached_metadata(); + if (ic_oop != NULL) { + if (ic_oop->is_klass()) { + if (((Klass*)ic_oop)->is_loader_alive(is_alive)) { + continue; + } + } else if (ic_oop->is_method()) { + if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) { + continue; + } + } else { + ShouldNotReachHere(); + } + } + } + ic->set_to_clean(); + } + } + } + + // Compiled code + { + RelocIterator iter(this, low_boundary); + while (iter.next()) { + if (iter.type() == relocInfo::oop_type) { + oop_Relocation* r = iter.oop_reloc(); + // In this loop, we must only traverse those oops directly embedded in + // the code. Other oops (oop_index>0) are seen as part of scopes_oops. + assert(1 == (r->oop_is_immediate()) + + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), + "oop must be found in exactly one place"); + if (r->oop_is_immediate() && r->oop_value() != NULL) { + if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) { + return; + } + } + } + } + } + + + // Scopes + for (oop* p = oops_begin(); p < oops_end(); p++) { + if (*p == Universe::non_oop_word()) continue; // skip non-oops + if (can_unload(is_alive, p, unloading_occurred)) { + return; + } + } + + // Ensure that all metadata is still alive + verify_metadata_loaders(low_boundary, is_alive); +} + +#ifdef ASSERT + +class CheckClass : AllStatic { + static BoolObjectClosure* _is_alive; + + // Check class_loader is alive for this bit of metadata. + static void check_class(Metadata* md) { + Klass* klass = NULL; + if (md->is_klass()) { + klass = ((Klass*)md); + } else if (md->is_method()) { + klass = ((Method*)md)->method_holder(); + } else if (md->is_methodData()) { + klass = ((MethodData*)md)->method()->method_holder(); + } else { + md->print(); + ShouldNotReachHere(); + } + assert(klass->is_loader_alive(_is_alive), "must be alive"); + } + public: + static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) { + assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint"); + _is_alive = is_alive; + nm->metadata_do(check_class); + } +}; + +// This is called during a safepoint so can use static data +BoolObjectClosure* CheckClass::_is_alive = NULL; +#endif // ASSERT + + +// Processing of oop references should have been sufficient to keep +// all strong references alive. Any weak references should have been +// cleared as well. Visit all the metadata and ensure that it's +// really alive. +void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) { +#ifdef ASSERT + RelocIterator iter(this, low_boundary); + while (iter.next()) { + // static_stub_Relocations may have dangling references to + // Method*s so trim them out here. Otherwise it looks like + // compiled code is maintaining a link to dead metadata. + address static_call_addr = NULL; + if (iter.type() == relocInfo::opt_virtual_call_type) { + CompiledIC* cic = CompiledIC_at(iter.reloc()); + if (!cic->is_call_to_interpreted()) { + static_call_addr = iter.addr(); + } + } else if (iter.type() == relocInfo::static_call_type) { + CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); + if (!csc->is_call_to_interpreted()) { + static_call_addr = iter.addr(); + } + } + if (static_call_addr != NULL) { + RelocIterator sciter(this, low_boundary); + while (sciter.next()) { + if (sciter.type() == relocInfo::static_stub_type && + sciter.static_stub_reloc()->static_call() == static_call_addr) { + sciter.static_stub_reloc()->clear_inline_cache(); + } + } + } + } + // Check that the metadata embedded in the nmethod is alive + CheckClass::do_check_class(is_alive, this); +#endif +} + + +// Iterate over metadata calling this function. Used by RedefineClasses +void nmethod::metadata_do(void f(Metadata*)) { + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } + { + // Visit all immediate references that are embedded in the instruction stream. + RelocIterator iter(this, low_boundary); + while (iter.next()) { + if (iter.type() == relocInfo::metadata_type ) { + metadata_Relocation* r = iter.metadata_reloc(); + // In this lmetadata, we must only follow those metadatas directly embedded in + // the code. Other metadatas (oop_index>0) are seen as part of + // the metadata section below. + assert(1 == (r->metadata_is_immediate()) + + (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), + "metadata must be found in exactly one place"); + if (r->metadata_is_immediate() && r->metadata_value() != NULL) { + Metadata* md = r->metadata_value(); + f(md); + } + } + } + } + + // Visit the metadata section + for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { + if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops + Metadata* md = *p; + f(md); + } + // Call function Method*, not embedded in these other places. + if (_method != NULL) f(_method); +} + + +// This method is called twice during GC -- once while +// tracing the "active" nmethods on thread stacks during +// the (strong) marking phase, and then again when walking +// the code cache contents during the weak roots processing +// phase. The two uses are distinguished by means of the +// 'do_strong_roots_only' flag, which is true in the first +// case. We want to walk the weak roots in the nmethod +// only in the second case. The weak roots in the nmethod +// are the oops in the ExceptionCache and the InlineCache +// oops. +void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) { + // make sure the oops ready to receive visitors + assert(!is_zombie() && !is_unloaded(), + "should not call follow on zombie or unloaded nmethod"); + + // If the method is not entrant or zombie then a JMP is plastered over the + // first few bytes. If an oop in the old code was there, that oop + // should not get GC'd. Skip the first few bytes of oops on + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { + low_boundary += NativeJump::instruction_size; + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } + +#ifdef GRAAL + if (_graal_installed_code != NULL) { + f->do_oop((oop*) &_graal_installed_code); + } + if (_triggered_deoptimizations != NULL) { + f->do_oop((oop*) &_triggered_deoptimizations); + } +#endif + + RelocIterator iter(this, low_boundary); + + while (iter.next()) { + if (iter.type() == relocInfo::oop_type ) { + oop_Relocation* r = iter.oop_reloc(); + // In this loop, we must only follow those oops directly embedded in + // the code. Other oops (oop_index>0) are seen as part of scopes_oops. + assert(1 == (r->oop_is_immediate()) + + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), + "oop must be found in exactly one place"); + if (r->oop_is_immediate() && r->oop_value() != NULL) { + f->do_oop(r->oop_addr()); + } + } + } + + // Scopes + // This includes oop constants not inlined in the code stream. + for (oop* p = oops_begin(); p < oops_end(); p++) { + if (*p == Universe::non_oop_word()) continue; // skip non-oops + f->do_oop(p); + } +} + +#define NMETHOD_SENTINEL ((nmethod*)badAddress) + +nmethod* volatile nmethod::_oops_do_mark_nmethods; + +// An nmethod is "marked" if its _mark_link is set non-null. +// Even if it is the end of the linked list, it will have a non-null link value, +// as long as it is on the list. +// This code must be MP safe, because it is used from parallel GC passes. +bool nmethod::test_set_oops_do_mark() { + assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); + nmethod* observed_mark_link = _oops_do_mark_link; + if (observed_mark_link == NULL) { + // Claim this nmethod for this thread to mark. + observed_mark_link = (nmethod*) + Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL); + if (observed_mark_link == NULL) { + + // Atomically append this nmethod (now claimed) to the head of the list: + nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; + for (;;) { + nmethod* required_mark_nmethods = observed_mark_nmethods; + _oops_do_mark_link = required_mark_nmethods; + observed_mark_nmethods = (nmethod*) + Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods); + if (observed_mark_nmethods == required_mark_nmethods) + break; + } + // Mark was clear when we first saw this guy. + NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark")); + return false; + } + } + // On fall through, another racing thread marked this nmethod before we did. + return true; +} + +void nmethod::oops_do_marking_prologue() { + NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue")); + assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row"); + // We use cmpxchg_ptr instead of regular assignment here because the user + // may fork a bunch of threads, and we need them all to see the same state. + void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL); + guarantee(observed == NULL, "no races in this sequential code"); +} + +void nmethod::oops_do_marking_epilogue() { + assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row"); + nmethod* cur = _oops_do_mark_nmethods; + while (cur != NMETHOD_SENTINEL) { + assert(cur != NULL, "not NULL-terminated"); + nmethod* next = cur->_oops_do_mark_link; + cur->_oops_do_mark_link = NULL; + cur->fix_oop_relocations(); + NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark")); + cur = next; + } + void* required = _oops_do_mark_nmethods; + void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required); + guarantee(observed == required, "no races in this sequential code"); + NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]")); +} + +class DetectScavengeRoot: public OopClosure { + bool _detected_scavenge_root; +public: + DetectScavengeRoot() : _detected_scavenge_root(false) + { NOT_PRODUCT(_print_nm = NULL); } + bool detected_scavenge_root() { return _detected_scavenge_root; } + virtual void do_oop(oop* p) { + if ((*p) != NULL && (*p)->is_scavengable()) { + NOT_PRODUCT(maybe_print(p)); + _detected_scavenge_root = true; + } + } + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } + +#ifndef PRODUCT + nmethod* _print_nm; + void maybe_print(oop* p) { + if (_print_nm == NULL) return; + if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root"); + tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")", + _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm), + (intptr_t)(*p), (intptr_t)p); + (*p)->print(); + } +#endif //PRODUCT +}; + +bool nmethod::detect_scavenge_root_oops() { + DetectScavengeRoot detect_scavenge_root; + NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this); + oops_do(&detect_scavenge_root); + return detect_scavenge_root.detected_scavenge_root(); +} + +// Method that knows how to preserve outgoing arguments at call. This method must be +// called with a frame corresponding to a Java invoke +void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { +#ifndef SHARK + if (!method()->is_native()) { + SimpleScopeDesc ssd(this, fr.pc()); + Bytecode_invoke call(ssd.method(), ssd.bci()); + // compiled invokedynamic call sites have an implicit receiver at + // resolution time, so make sure it gets GC'ed. + bool has_receiver = !call.is_invokestatic(); + Symbol* signature = call.signature(); + fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f); + } +#endif // !SHARK +} + + +oop nmethod::embeddedOop_at(u_char* p) { + RelocIterator iter(this, p, p + 1); + while (iter.next()) + if (iter.type() == relocInfo::oop_type) { + return iter.oop_reloc()->oop_value(); + } + return NULL; +} + + +inline bool includes(void* p, void* from, void* to) { + return from <= p && p < to; +} + + +void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) { + assert(count >= 2, "must be sentinel values, at least"); + +#ifdef ASSERT + // must be sorted and unique; we do a binary search in find_pc_desc() + int prev_offset = pcs[0].pc_offset(); + assert(prev_offset == PcDesc::lower_offset_limit, + "must start with a sentinel"); + for (int i = 1; i < count; i++) { + int this_offset = pcs[i].pc_offset(); + assert(this_offset > prev_offset, "offsets must be sorted"); + prev_offset = this_offset; + } + assert(prev_offset == PcDesc::upper_offset_limit, + "must end with a sentinel"); +#endif //ASSERT + + // Search for MethodHandle invokes and tag the nmethod. + for (int i = 0; i < count; i++) { + if (pcs[i].is_method_handle_invoke()) { + set_has_method_handle_invokes(true); + break; + } + } + assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler"); + + int size = count * sizeof(PcDesc); + assert(scopes_pcs_size() >= size, "oob"); + memcpy(scopes_pcs_begin(), pcs, size); + + // Adjust the final sentinel downward. + PcDesc* last_pc = &scopes_pcs_begin()[count-1]; + assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity"); + last_pc->set_pc_offset(content_size() + 1); + for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) { + // Fill any rounding gaps with copies of the last record. + last_pc[1] = last_pc[0]; + } + // The following assert could fail if sizeof(PcDesc) is not + // an integral multiple of oopSize (the rounding term). + // If it fails, change the logic to always allocate a multiple + // of sizeof(PcDesc), and fill unused words with copies of *last_pc. + assert(last_pc + 1 == scopes_pcs_end(), "must match exactly"); +} + +void nmethod::copy_scopes_data(u_char* buffer, int size) { + assert(scopes_data_size() >= size, "oob"); + memcpy(scopes_data_begin(), buffer, size); +} + + +#ifdef ASSERT +static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) { + PcDesc* lower = nm->scopes_pcs_begin(); + PcDesc* upper = nm->scopes_pcs_end(); + lower += 1; // exclude initial sentinel + PcDesc* res = NULL; + for (PcDesc* p = lower; p < upper; p++) { + NOT_PRODUCT(--nmethod_stats.pc_desc_tests); // don't count this call to match_desc + if (match_desc(p, pc_offset, approximate)) { + if (res == NULL) + res = p; + else + res = (PcDesc*) badAddress; + } + } + return res; +} +#endif + + +// Finds a PcDesc with real-pc equal to "pc" +PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) { + address base_address = code_begin(); + if ((pc < base_address) || + (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) { + return NULL; // PC is wildly out of range + } + int pc_offset = (int) (pc - base_address); + + // Check the PcDesc cache if it contains the desired PcDesc + // (This as an almost 100% hit rate.) + PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate); + if (res != NULL) { + assert(res == linear_search(this, pc_offset, approximate), "cache ok"); + return res; + } + + // Fallback algorithm: quasi-linear search for the PcDesc + // Find the last pc_offset less than the given offset. + // The successor must be the required match, if there is a match at all. + // (Use a fixed radix to avoid expensive affine pointer arithmetic.) + PcDesc* lower = scopes_pcs_begin(); + PcDesc* upper = scopes_pcs_end(); + upper -= 1; // exclude final sentinel + if (lower >= upper) return NULL; // native method; no PcDescs at all + +#define assert_LU_OK \ + /* invariant on lower..upper during the following search: */ \ + assert(lower->pc_offset() < pc_offset, "sanity"); \ + assert(upper->pc_offset() >= pc_offset, "sanity") + assert_LU_OK; + + // Use the last successful return as a split point. + PcDesc* mid = _pc_desc_cache.last_pc_desc(); + NOT_PRODUCT(++nmethod_stats.pc_desc_searches); + if (mid->pc_offset() < pc_offset) { + lower = mid; + } else { + upper = mid; + } + + // Take giant steps at first (4096, then 256, then 16, then 1) + const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1); + const int RADIX = (1 << LOG2_RADIX); + for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { + while ((mid = lower + step) < upper) { + assert_LU_OK; + NOT_PRODUCT(++nmethod_stats.pc_desc_searches); + if (mid->pc_offset() < pc_offset) { + lower = mid; + } else { + upper = mid; + break; + } + } + assert_LU_OK; + } + + // Sneak up on the value with a linear search of length ~16. + while (true) { + assert_LU_OK; + mid = lower + 1; + NOT_PRODUCT(++nmethod_stats.pc_desc_searches); + if (mid->pc_offset() < pc_offset) { + lower = mid; + } else { + upper = mid; + break; + } + } +#undef assert_LU_OK + + if (match_desc(upper, pc_offset, approximate)) { + assert(upper == linear_search(this, pc_offset, approximate), "search ok"); + _pc_desc_cache.add_pc_desc(upper); + return upper; + } else { + assert(NULL == linear_search(this, pc_offset, approximate), "search ok"); + return NULL; + } +} + + +bool nmethod::check_all_dependencies() { + bool found_check = false; + // wholesale check of all dependencies + for (Dependencies::DepStream deps(this); deps.next(); ) { + if (deps.check_dependency() != NULL) { + found_check = true; + NOT_DEBUG(break); + } + } + return found_check; // tell caller if we found anything +} + +bool nmethod::check_dependency_on(DepChange& changes) { + // What has happened: + // 1) a new class dependee has been added + // 2) dependee and all its super classes have been marked + bool found_check = false; // set true if we are upset + for (Dependencies::DepStream deps(this); deps.next(); ) { + // Evaluate only relevant dependencies. + if (deps.spot_check_dependency_at(changes) != NULL) { + found_check = true; + NOT_DEBUG(break); + } + } + return found_check; +} + +bool nmethod::is_evol_dependent_on(Klass* dependee) { + InstanceKlass *dependee_ik = InstanceKlass::cast(dependee); + Array* dependee_methods = dependee_ik->methods(); + for (Dependencies::DepStream deps(this); deps.next(); ) { + if (deps.type() == Dependencies::evol_method) { + Method* method = deps.method_argument(0); + for (int j = 0; j < dependee_methods->length(); j++) { + if (dependee_methods->at(j) == method) { + // RC_TRACE macro has an embedded ResourceMark + RC_TRACE(0x01000000, + ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", + _method->method_holder()->external_name(), + _method->name()->as_C_string(), + _method->signature()->as_C_string(), compile_id(), + method->method_holder()->external_name(), + method->name()->as_C_string(), + method->signature()->as_C_string())); + if (TraceDependencies || LogCompilation) + deps.log_dependency(dependee); + return true; + } + } + } + } + return false; +} + +// Called from mark_for_deoptimization, when dependee is invalidated. +bool nmethod::is_dependent_on_method(Method* dependee) { + for (Dependencies::DepStream deps(this); deps.next(); ) { + if (deps.type() != Dependencies::evol_method) + continue; + Method* method = deps.method_argument(0); + if (method == dependee) return true; + } + return false; +} + + +bool nmethod::is_patchable_at(address instr_addr) { + assert(insts_contains(instr_addr), "wrong nmethod used"); + if (is_zombie()) { + // a zombie may never be patched + return false; + } + return true; +} + + +address nmethod::continuation_for_implicit_exception(address pc) { + // Exception happened outside inline-cache check code => we are inside + // an active nmethod => use cpc to determine a return address + int exception_offset = pc - code_begin(); + int cont_offset = ImplicitExceptionTable(this).at( exception_offset ); +#ifdef ASSERT + if (cont_offset == 0) { + Thread* thread = ThreadLocalStorage::get_thread_slow(); + ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY + HandleMark hm(thread); + ResourceMark rm(thread); + CodeBlob* cb = CodeCache::find_blob(pc); + assert(cb != NULL && cb == this, ""); + tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc); + print(); + method()->print_codes(); + print_code(); + print_pcs(); + } +#endif + if (cont_offset == 0) { + // Let the normal error handling report the exception + return NULL; + } + return code_begin() + cont_offset; +} + + + +void nmethod_init() { + // make sure you didn't forget to adjust the filler fields + assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word"); +} + + +//------------------------------------------------------------------------------------------- + + +// QQQ might we make this work from a frame?? +nmethodLocker::nmethodLocker(address pc) { + CodeBlob* cb = CodeCache::find_blob(pc); + guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); + _nm = (nmethod*)cb; + lock_nmethod(_nm); +} + +// Only JvmtiDeferredEvent::compiled_method_unload_event() +// should pass zombie_ok == true. +void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) { + if (nm == NULL) return; + Atomic::inc(&nm->_lock_count); + guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); +} + +void nmethodLocker::unlock_nmethod(nmethod* nm) { + if (nm == NULL) return; + Atomic::dec(&nm->_lock_count); + guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock"); +} + + +// ----------------------------------------------------------------------------- +// nmethod::get_deopt_original_pc +// +// Return the original PC for the given PC if: +// (a) the given PC belongs to a nmethod and +// (b) it is a deopt PC +address nmethod::get_deopt_original_pc(const frame* fr) { + if (fr->cb() == NULL) return NULL; + + nmethod* nm = fr->cb()->as_nmethod_or_null(); + if (nm != NULL && nm->is_deopt_pc(fr->pc())) + return nm->get_original_pc(fr); + + return NULL; +} + + +// ----------------------------------------------------------------------------- +// MethodHandle + +bool nmethod::is_method_handle_return(address return_pc) { + if (!has_method_handle_invokes()) return false; + PcDesc* pd = pc_desc_at(return_pc); + if (pd == NULL) + return false; + return pd->is_method_handle_invoke(); +} + + +// ----------------------------------------------------------------------------- +// Verification + +class VerifyOopsClosure: public OopClosure { + nmethod* _nm; + bool _ok; +public: + VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { } + bool ok() { return _ok; } + virtual void do_oop(oop* p) { + if ((*p) == NULL || (*p)->is_oop()) return; + if (_ok) { + _nm->print_nmethod(true); + _ok = false; + } + tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", + (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); + } + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } +}; + +void nmethod::verify() { + + // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant + // seems odd. + + if( is_zombie() || is_not_entrant() ) + return; + + // Make sure all the entry points are correctly aligned for patching. + NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); + + // assert(method()->is_oop(), "must be valid"); + + ResourceMark rm; + + if (!CodeCache::contains(this)) { + fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this)); + } + + if(is_native_method() ) + return; + + nmethod* nm = CodeCache::find_nmethod(verified_entry_point()); + if (nm != this) { + fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", + this)); + } + + for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { + if (! p->verify(this)) { + tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this); + } + } + + VerifyOopsClosure voc(this); + oops_do(&voc); + assert(voc.ok(), "embedded oops must be OK"); + verify_scavenge_root_oops(); + + verify_scopes(); +} + + +void nmethod::verify_interrupt_point(address call_site) { + // This code does not work in release mode since + // owns_lock only is available in debug mode. + CompiledIC* ic = NULL; + Thread *cur = Thread::current(); + if (CompiledIC_lock->owner() == cur || + ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && + SafepointSynchronize::is_at_safepoint())) { + ic = CompiledIC_at(this, call_site); + CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); + } else { + MutexLocker ml_verify (CompiledIC_lock); + ic = CompiledIC_at(this, call_site); + } + + PcDesc* pd = pc_desc_at(ic->end_of_call()); + assert(pd != NULL, "PcDesc must exist"); + for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), + pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), + pd->return_oop()); + !sd->is_top(); sd = sd->sender()) { + sd->verify(); + } +} + +void nmethod::verify_scopes() { + if( !method() ) return; // Runtime stubs have no scope + if (method()->is_native()) return; // Ignore stub methods. + // iterate through all interrupt point + // and verify the debug information is valid. + RelocIterator iter((nmethod*)this); + while (iter.next()) { + address stub = NULL; + switch (iter.type()) { + case relocInfo::virtual_call_type: + verify_interrupt_point(iter.addr()); + break; + case relocInfo::opt_virtual_call_type: + stub = iter.opt_virtual_call_reloc()->static_stub(); + verify_interrupt_point(iter.addr()); + break; + case relocInfo::static_call_type: + stub = iter.static_call_reloc()->static_stub(); + //verify_interrupt_point(iter.addr()); + break; + case relocInfo::runtime_call_type: + address destination = iter.reloc()->value(); + // Right now there is no way to find out which entries support + // an interrupt point. It would be nice if we had this + // information in a table. + break; + } + assert(stub == NULL || stub_contains(stub), "static call stub outside stub section"); + } +} + + +// ----------------------------------------------------------------------------- +// Non-product code +#ifndef PRODUCT + +class DebugScavengeRoot: public OopClosure { + nmethod* _nm; + bool _ok; +public: + DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { } + bool ok() { return _ok; } + virtual void do_oop(oop* p) { + if ((*p) == NULL || !(*p)->is_scavengable()) return; + if (_ok) { + _nm->print_nmethod(true); + _ok = false; + } + tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", + (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); + (*p)->print(); + } + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } +}; + +void nmethod::verify_scavenge_root_oops() { + if (!on_scavenge_root_list()) { + // Actually look inside, to verify the claim that it's clean. + DebugScavengeRoot debug_scavenge_root(this); + oops_do(&debug_scavenge_root); + if (!debug_scavenge_root.ok()) + fatal("found an unadvertised bad scavengable oop in the code cache"); + } + assert(scavenge_root_not_marked(), ""); +} + +#endif // PRODUCT + +// Printing operations + +void nmethod::print() const { + ResourceMark rm; + ttyLocker ttyl; // keep the following output all in one block + + tty->print("Compiled method "); + + if (is_compiled_by_c1()) { + tty->print("(c1) "); + } else if (is_compiled_by_c2()) { + tty->print("(c2) "); + } else if (is_compiled_by_shark()) { + tty->print("(shark) "); + } else if (is_compiled_by_graal()) { + tty->print("(Graal) "); + } else { + tty->print("(nm) "); + } + + print_on(tty, NULL); + + if (WizardMode) { + tty->print("((nmethod*) "INTPTR_FORMAT ") ", this); + tty->print(" for method " INTPTR_FORMAT , (address)method()); + tty->print(" { "); + if (is_in_use()) tty->print("in_use "); + if (is_not_entrant()) tty->print("not_entrant "); + if (is_zombie()) tty->print("zombie "); + if (is_unloaded()) tty->print("unloaded "); + if (on_scavenge_root_list()) tty->print("scavenge_root "); + tty->print_cr("}:"); + } + if (size () > 0) tty->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + (address)this, + (address)this + size(), + size()); + if (relocation_size () > 0) tty->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + relocation_begin(), + relocation_end(), + relocation_size()); + if (consts_size () > 0) tty->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + consts_begin(), + consts_end(), + consts_size()); + if (insts_size () > 0) tty->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + insts_begin(), + insts_end(), + insts_size()); + if (stub_size () > 0) tty->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + stub_begin(), + stub_end(), + stub_size()); + if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + oops_begin(), + oops_end(), + oops_size()); + if (metadata_size () > 0) tty->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + metadata_begin(), + metadata_end(), + metadata_size()); + if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + scopes_data_begin(), + scopes_data_end(), + scopes_data_size()); + if (scopes_pcs_size () > 0) tty->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + scopes_pcs_begin(), + scopes_pcs_end(), + scopes_pcs_size()); + if (dependencies_size () > 0) tty->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + dependencies_begin(), + dependencies_end(), + dependencies_size()); + if (handler_table_size() > 0) tty->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + handler_table_begin(), + handler_table_end(), + handler_table_size()); + if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", + nul_chk_table_begin(), + nul_chk_table_end(), + nul_chk_table_size()); +} + +void nmethod::print_code() { + HandleMark hm; + ResourceMark m; + Disassembler::decode(this); +} + + +#ifndef PRODUCT + +void nmethod::print_scopes() { + // Find the first pc desc for all scopes in the code and print it. + ResourceMark rm; + for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { + if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null) + continue; + + ScopeDesc* sd = scope_desc_at(p->real_pc(this)); + sd->print_on(tty, p); + } +} + +void nmethod::print_dependencies() { + ResourceMark rm; + ttyLocker ttyl; // keep the following output all in one block + tty->print_cr("Dependencies:"); + for (Dependencies::DepStream deps(this); deps.next(); ) { + deps.print_dependency(); + Klass* ctxk = deps.context_type(); + if (ctxk != NULL) { + if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) { + tty->print_cr(" [nmethod<=klass]%s", ctxk->external_name()); + } + } + deps.log_dependency(); // put it into the xml log also + } +} + + +void nmethod::print_relocations() { + ResourceMark m; // in case methods get printed via the debugger + tty->print_cr("relocations:"); + RelocIterator iter(this); + iter.print(); + if (UseRelocIndex) { + jint* index_end = (jint*)relocation_end() - 1; + jint index_size = *index_end; + jint* index_start = (jint*)( (address)index_end - index_size ); + tty->print_cr(" index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size); + if (index_size > 0) { + jint* ip; + for (ip = index_start; ip+2 <= index_end; ip += 2) + tty->print_cr(" (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT, + ip[0], + ip[1], + header_end()+ip[0], + relocation_begin()-1+ip[1]); + for (; ip < index_end; ip++) + tty->print_cr(" (%d ?)", ip[0]); + tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip++); + tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip); + } + } +} + + +void nmethod::print_pcs() { + ResourceMark m; // in case methods get printed via debugger + tty->print_cr("pc-bytecode offsets:"); + for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { + p->print(this); + } +} + +#endif // PRODUCT + +const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { + RelocIterator iter(this, begin, end); + bool have_one = false; + while (iter.next()) { + have_one = true; + switch (iter.type()) { + case relocInfo::none: return "no_reloc"; + case relocInfo::oop_type: { + stringStream st; + oop_Relocation* r = iter.oop_reloc(); + oop obj = r->oop_value(); + st.print("oop("); + if (obj == NULL) st.print("NULL"); + else obj->print_value_on(&st); + st.print(")"); + return st.as_string(); + } + case relocInfo::metadata_type: { + stringStream st; + metadata_Relocation* r = iter.metadata_reloc(); + Metadata* obj = r->metadata_value(); + st.print("metadata("); + if (obj == NULL) st.print("NULL"); + else obj->print_value_on(&st); + st.print(")"); + return st.as_string(); + } + case relocInfo::virtual_call_type: return "virtual_call"; + case relocInfo::opt_virtual_call_type: return "optimized virtual_call"; + case relocInfo::static_call_type: return "static_call"; + case relocInfo::static_stub_type: return "static_stub"; + case relocInfo::runtime_call_type: return "runtime_call"; + case relocInfo::external_word_type: return "external_word"; + case relocInfo::internal_word_type: return "internal_word"; + case relocInfo::section_word_type: return "section_word"; + case relocInfo::poll_type: return "poll"; + case relocInfo::poll_return_type: return "poll_return"; + case relocInfo::type_mask: return "type_bit_mask"; + } + } + return have_one ? "other" : NULL; +} + +// Return a the last scope in (begin..end] +ScopeDesc* nmethod::scope_desc_in(address begin, address end) { + PcDesc* p = pc_desc_near(begin+1); + if (p != NULL && p->real_pc(this) <= end) { + return new ScopeDesc(this, p->scope_decode_offset(), + p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(), + p->return_oop()); + } + return NULL; +} + +void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const { + if (block_begin == entry_point()) stream->print_cr("[Entry Point]"); + if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]"); + if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin()) stream->print_cr("[Exception Handler]"); + if (block_begin == stub_begin()) stream->print_cr("[Stub Code]"); + if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]"); + + if (has_method_handle_invokes()) + if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]"); + + if (block_begin == consts_begin()) stream->print_cr("[Constants]"); + + if (block_begin == entry_point()) { + methodHandle m = method(); + if (m.not_null()) { + stream->print(" # "); + m->print_value_on(stream); + stream->cr(); + } + if (m.not_null() && !is_osr_method()) { + ResourceMark rm; + int sizeargs = m->size_of_parameters(); + BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); + VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); + { + int sig_index = 0; + if (!m->is_static()) + sig_bt[sig_index++] = T_OBJECT; // 'this' + for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) { + BasicType t = ss.type(); + sig_bt[sig_index++] = t; + if (type2size[t] == 2) { + sig_bt[sig_index++] = T_VOID; + } else { + assert(type2size[t] == 1, "size is 1 or 2"); + } + } + assert(sig_index == sizeargs, ""); + } + const char* spname = "sp"; // make arch-specific? + intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false); + int stack_slot_offset = this->frame_size() * wordSize; + int tab1 = 14, tab2 = 24; + int sig_index = 0; + int arg_index = (m->is_static() ? 0 : -1); + bool did_old_sp = false; + for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) { + bool at_this = (arg_index == -1); + bool at_old_sp = false; + BasicType t = (at_this ? T_OBJECT : ss.type()); + assert(t == sig_bt[sig_index], "sigs in sync"); + if (at_this) + stream->print(" # this: "); + else + stream->print(" # parm%d: ", arg_index); + stream->move_to(tab1); + VMReg fst = regs[sig_index].first(); + VMReg snd = regs[sig_index].second(); + if (fst->is_reg()) { + stream->print("%s", fst->name()); + if (snd->is_valid()) { + stream->print(":%s", snd->name()); + } + } else if (fst->is_stack()) { + int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset; + if (offset == stack_slot_offset) at_old_sp = true; + stream->print("[%s+0x%x]", spname, offset); + } else { + stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd); + } + stream->print(" "); + stream->move_to(tab2); + stream->print("= "); + if (at_this) { + m->method_holder()->print_value_on(stream); + } else { + bool did_name = false; + if (!at_this && ss.is_object()) { + Symbol* name = ss.as_symbol_or_null(); + if (name != NULL) { + name->print_value_on(stream); + did_name = true; + } + } + if (!did_name) + stream->print("%s", type2name(t)); + } + if (at_old_sp) { + stream->print(" (%s of caller)", spname); + did_old_sp = true; + } + stream->cr(); + sig_index += type2size[t]; + arg_index += 1; + if (!at_this) ss.next(); + } + if (!did_old_sp) { + stream->print(" # "); + stream->move_to(tab1); + stream->print("[%s+0x%x]", spname, stack_slot_offset); + stream->print(" (%s of caller)", spname); + stream->cr(); + } + } + } +} + +void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) { + // First, find an oopmap in (begin, end]. + // We use the odd half-closed interval so that oop maps and scope descs + // which are tied to the byte after a call are printed with the call itself. + address base = code_begin(); + OopMapSet* oms = oop_maps(); + if (oms != NULL) { + for (int i = 0, imax = oms->size(); i < imax; i++) { + OopMap* om = oms->at(i); + address pc = base + om->offset(); + if (pc > begin) { + if (pc <= end) { + st->move_to(column); + st->print("; "); + om->print_on(st); + } + break; + } + } + } + + // Print any debug info present at this pc. + ScopeDesc* sd = scope_desc_in(begin, end); + if (sd != NULL) { + st->move_to(column); + if (sd->bci() == SynchronizationEntryBCI) { + st->print(";*synchronization entry"); + } else { + if (sd->method() == NULL) { + st->print("method is NULL"); + } else if (sd->method()->is_native()) { + st->print("method is native"); + } else { + Bytecodes::Code bc = sd->method()->java_code_at(sd->bci()); + st->print(";*%s", Bytecodes::name(bc)); + switch (bc) { + case Bytecodes::_invokevirtual: + case Bytecodes::_invokespecial: + case Bytecodes::_invokestatic: + case Bytecodes::_invokeinterface: + { + Bytecode_invoke invoke(sd->method(), sd->bci()); + st->print(" "); + if (invoke.name() != NULL) + invoke.name()->print_symbol_on(st); + else + st->print(""); + break; + } + case Bytecodes::_getfield: + case Bytecodes::_putfield: + case Bytecodes::_getstatic: + case Bytecodes::_putstatic: + { + Bytecode_field field(sd->method(), sd->bci()); + st->print(" "); + if (field.name() != NULL) + field.name()->print_symbol_on(st); + else + st->print(""); + } + } + } + } + + // Print all scopes + for (;sd != NULL; sd = sd->sender()) { + st->move_to(column); + st->print("; -"); + if (sd->method() == NULL) { + st->print("method is NULL"); + } else { + sd->method()->print_short_name(st); + } + int lineno = sd->method()->line_number_from_bci(sd->bci()); + if (lineno != -1) { + st->print("@%d (line %d)", sd->bci(), lineno); + } else { + st->print("@%d", sd->bci()); + } + st->cr(); + } + } + + // Print relocation information + const char* str = reloc_string_for(begin, end); + if (str != NULL) { + if (sd != NULL) st->cr(); + st->move_to(column); + st->print("; {%s}", str); + } + int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin()); + if (cont_offset != 0) { + st->move_to(column); + st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset); + } + +} + +#ifndef PRODUCT + +void nmethod::print_value_on(outputStream* st) const { + st->print("nmethod"); + print_on(st, NULL); +} + +void nmethod::print_calls(outputStream* st) { + RelocIterator iter(this); + while (iter.next()) { + switch (iter.type()) { + case relocInfo::virtual_call_type: + case relocInfo::opt_virtual_call_type: { + VerifyMutexLocker mc(CompiledIC_lock); + CompiledIC_at(iter.reloc())->print(); + break; + } + case relocInfo::static_call_type: + st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr()); + compiledStaticCall_at(iter.reloc())->print(); + break; + } + } +} + +void nmethod::print_handler_table() { + ExceptionHandlerTable(this).print(); +} + +void nmethod::print_nul_chk_table() { + ImplicitExceptionTable(this).print(code_begin()); +} + +#endif // PRODUCT + +void nmethod::print_statistics() { + ttyLocker ttyl; + if (xtty != NULL) xtty->head("statistics type='nmethod'"); + nmethod_stats.print_native_nmethod_stats(); + nmethod_stats.print_nmethod_stats(); + DebugInformationRecorder::print_statistics(); + nmethod_stats.print_pc_stats(); + Dependencies::print_statistics(); + if (xtty != NULL) xtty->tail("statistics"); +} diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/graal/graalCodeInstaller.cpp --- a/src/share/vm/graal/graalCodeInstaller.cpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/graal/graalCodeInstaller.cpp Tue May 14 10:18:31 2013 +0200 @@ -303,10 +303,10 @@ return new MonitorValue(owner_value, lock_data_loc, eliminated); } -void CodeInstaller::initialize_assumptions(oop target_method) { +void CodeInstaller::initialize_assumptions(oop compiled_code) { _oop_recorder = new OopRecorder(&_arena); _dependencies = new Dependencies(&_arena, _oop_recorder); - Handle assumptions_handle = CompilationResult::assumptions(HotSpotCompilationResult::comp(target_method)); + Handle assumptions_handle = CompilationResult::assumptions(HotSpotCompiledCode::comp(compiled_code)); if (!assumptions_handle.is_null()) { objArrayHandle assumptions(Thread::current(), (objArrayOop)Assumptions::list(assumptions_handle())); int length = assumptions->length(); @@ -332,8 +332,8 @@ } } -GrowableArray* get_leaf_graph_ids(Handle& comp_result) { - arrayOop leafGraphArray = (arrayOop) CompilationResult::leafGraphIds(HotSpotCompilationResult::comp(comp_result)); +GrowableArray* get_leaf_graph_ids(Handle& compiled_code) { + arrayOop leafGraphArray = (arrayOop) CompilationResult::leafGraphIds(HotSpotCompiledCode::comp(compiled_code)); jint length; if (leafGraphArray == NULL) { @@ -351,25 +351,25 @@ } // constructor used to create a method -CodeInstaller::CodeInstaller(Handle& comp_result, GraalEnv::CodeInstallResult& result, CodeBlob*& cb, Handle installed_code, Handle triggered_deoptimizations) { +CodeInstaller::CodeInstaller(Handle& compiled_code, GraalEnv::CodeInstallResult& result, CodeBlob*& cb, Handle installed_code, Handle triggered_deoptimizations) { GraalCompiler::initialize_buffer_blob(); CodeBuffer buffer(JavaThread::current()->get_buffer_blob()); - jobject comp_result_obj = JNIHandles::make_local(comp_result()); - jint entry_bci = HotSpotCompilationResult::entryBCI(comp_result); - initialize_assumptions(JNIHandles::resolve(comp_result_obj)); + jobject compiled_code_obj = JNIHandles::make_local(compiled_code()); + initialize_assumptions(JNIHandles::resolve(compiled_code_obj)); { No_Safepoint_Verifier no_safepoint; - initialize_fields(JNIHandles::resolve(comp_result_obj)); + initialize_fields(JNIHandles::resolve(compiled_code_obj)); initialize_buffer(buffer); process_exception_handlers(); } int stack_slots = _total_frame_size / HeapWordSize; // conversion to words - GrowableArray* leaf_graph_ids = get_leaf_graph_ids(comp_result); + GrowableArray* leaf_graph_ids = get_leaf_graph_ids(compiled_code); - if (_stubName != NULL) { - char* name = strdup(java_lang_String::as_utf8_string(_stubName)); + if (compiled_code->is_a(HotSpotCompiledRuntimeStub::klass())) { + oop stubName = HotSpotCompiledRuntimeStub::stubName(compiled_code); + char* name = strdup(java_lang_String::as_utf8_string(stubName)); cb = RuntimeStub::new_runtime_stub(name, &buffer, CodeOffsets::frame_never_safe, @@ -379,33 +379,34 @@ result = GraalEnv::ok; } else { nmethod* nm = NULL; - methodHandle method = getMethodFromHotSpotMethod(HotSpotCompilationResult::method(comp_result)); + methodHandle method = getMethodFromHotSpotMethod(HotSpotCompiledNmethod::method(compiled_code)); + jint entry_bci = HotSpotCompiledNmethod::entryBCI(compiled_code); result = GraalEnv::register_method(method, nm, entry_bci, &_offsets, _custom_stack_area_offset, &buffer, stack_slots, _debug_recorder->_oopmaps, &_exception_handler_table, GraalCompiler::instance(), _debug_recorder, _dependencies, NULL, -1, false, leaf_graph_ids, installed_code, triggered_deoptimizations); cb = nm; } } -void CodeInstaller::initialize_fields(oop comp_result) { - _comp_result = HotSpotCompilationResult::comp(comp_result); - oop hotspotJavaMethod = HotSpotCompilationResult::method(comp_result); - if (hotspotJavaMethod != NULL) { +void CodeInstaller::initialize_fields(oop compiled_code) { + oop comp_result = HotSpotCompiledCode::comp(compiled_code); + if (compiled_code->is_a(HotSpotCompiledNmethod::klass())) { + oop hotspotJavaMethod = HotSpotCompiledNmethod::method(compiled_code); methodHandle method = getMethodFromHotSpotMethod(hotspotJavaMethod); _parameter_count = method->size_of_parameters(); TRACE_graal_1("installing code for %s", method->name_and_sig_as_C_string()); } else { + assert(compiled_code->is_a(HotSpotCompiledRuntimeStub::klass()), "CCE"); // TODO (ds) not sure if this is correct - only used in OopMap constructor for non-product builds _parameter_count = 0; } - _stubName = HotSpotCompilationResult::stubName(comp_result); - _sites = (arrayOop) HotSpotCompilationResult::sites(comp_result); - _exception_handlers = (arrayOop) HotSpotCompilationResult::exceptionHandlers(comp_result); + _sites = (arrayOop) HotSpotCompiledCode::sites(compiled_code); + _exception_handlers = (arrayOop) HotSpotCompiledCode::exceptionHandlers(compiled_code); - _code = (arrayOop) CompilationResult::targetCode(_comp_result); - _code_size = CompilationResult::targetCodeSize(_comp_result); + _code = (arrayOop) CompilationResult::targetCode(comp_result); + _code_size = CompilationResult::targetCodeSize(comp_result); // The frame size we get from the target method does not include the return address, so add one word for it here. - _total_frame_size = CompilationResult::frameSize(_comp_result) + HeapWordSize; - _custom_stack_area_offset = CompilationResult::customStackAreaOffset(_comp_result); + _total_frame_size = CompilationResult::frameSize(comp_result) + HeapWordSize; + _custom_stack_area_offset = CompilationResult::customStackAreaOffset(comp_result); // (very) conservative estimate: each site needs a constant section entry _constants_size = _sites->length() * (BytesPerLong*2); diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/graal/graalCodeInstaller.hpp --- a/src/share/vm/graal/graalCodeInstaller.hpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/graal/graalCodeInstaller.hpp Tue May 14 10:18:31 2013 +0200 @@ -50,8 +50,6 @@ Arena _arena; - oop _comp_result; - oop _stubName; arrayOop _sites; arrayOop _exception_handlers; CodeOffsets _offsets; diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/graal/graalCompilerToGPU.cpp --- a/src/share/vm/graal/graalCompilerToGPU.cpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/graal/graalCompilerToGPU.cpp Tue May 14 10:18:31 2013 +0200 @@ -102,7 +102,7 @@ #define HS_RESOLVED_JAVA_TYPE "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaType;" #define HS_RESOLVED_METHOD "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod;" #define HS_RESOLVED_FIELD "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaField;" -#define HS_COMP_RESULT "Lcom/oracle/graal/hotspot/HotSpotCompilationResult;" +#define HS_COMPILED_CODE "Lcom/oracle/graal/hotspot/HotSpotCompiledCode;" #define HS_CONFIG "Lcom/oracle/graal/hotspot/HotSpotVMConfig;" #define HS_METHOD "Lcom/oracle/graal/hotspot/meta/HotSpotMethod;" #define HS_INSTALLED_CODE "Lcom/oracle/graal/hotspot/meta/HotSpotInstalledCode;" diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/graal/graalCompilerToVM.cpp --- a/src/share/vm/graal/graalCompilerToVM.cpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/graal/graalCompilerToVM.cpp Tue May 14 10:18:31 2013 +0200 @@ -867,16 +867,16 @@ C2V_END -C2V_VMENTRY(jint, installCode0, (JNIEnv *jniEnv, jobject, jobject compResult, jobject installed_code, jobject triggered_deoptimizations)) +C2V_VMENTRY(jint, installCode0, (JNIEnv *jniEnv, jobject, jobject compiled_code, jobject installed_code, jobject triggered_deoptimizations)) ResourceMark rm; HandleMark hm; - Handle compResultHandle = JNIHandles::resolve(compResult); + Handle compiled_code_handle = JNIHandles::resolve(compiled_code); CodeBlob* cb = NULL; Handle installed_code_handle = JNIHandles::resolve(installed_code); Handle triggered_deoptimizations_handle = JNIHandles::resolve(triggered_deoptimizations); GraalEnv::CodeInstallResult result; - CodeInstaller installer(compResultHandle, result, cb, installed_code_handle, triggered_deoptimizations_handle); + CodeInstaller installer(compiled_code_handle, result, cb, installed_code_handle, triggered_deoptimizations_handle); if (PrintCodeCacheOnCompilation) { stringStream s; @@ -895,7 +895,6 @@ if (!installed_code_handle.is_null()) { assert(installed_code_handle->is_a(HotSpotInstalledCode::klass()), "wrong type"); HotSpotInstalledCode::set_codeBlob(installed_code_handle, (jlong) cb); - HotSpotInstalledCode::set_method(installed_code_handle, HotSpotCompilationResult::method(compResult)); HotSpotInstalledCode::set_start(installed_code_handle, (jlong) cb->code_begin()); nmethod* nm = cb->as_nmethod_or_null(); assert(nm == NULL || !installed_code_handle->is_scavengable() || nm->on_scavenge_root_list(), "nm should be scavengable if installed_code is scavengable"); @@ -1158,7 +1157,7 @@ #define HS_RESOLVED_JAVA_TYPE "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaType;" #define HS_RESOLVED_METHOD "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod;" #define HS_RESOLVED_FIELD "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaField;" -#define HS_COMP_RESULT "Lcom/oracle/graal/hotspot/HotSpotCompilationResult;" +#define HS_COMPILED_CODE "Lcom/oracle/graal/hotspot/HotSpotCompiledCode;" #define HS_CONFIG "Lcom/oracle/graal/hotspot/HotSpotVMConfig;" #define HS_METHOD "Lcom/oracle/graal/hotspot/meta/HotSpotMethod;" #define HS_INSTALLED_CODE "Lcom/oracle/graal/hotspot/meta/HotSpotInstalledCode;" @@ -1201,7 +1200,7 @@ {CC"getMetaspaceConstructor", CC"("REFLECT_CONSTRUCTOR"["HS_RESOLVED_TYPE")"METASPACE_METHOD, FN_PTR(getMetaspaceConstructor)}, {CC"getJavaField", CC"("REFLECT_FIELD")"HS_RESOLVED_FIELD, FN_PTR(getJavaField)}, {CC"initializeConfiguration", CC"("HS_CONFIG")V", FN_PTR(initializeConfiguration)}, - {CC"installCode0", CC"("HS_COMP_RESULT HS_INSTALLED_CODE"[Z)I", FN_PTR(installCode0)}, + {CC"installCode0", CC"("HS_COMPILED_CODE HS_INSTALLED_CODE"[Z)I", FN_PTR(installCode0)}, {CC"getCode", CC"(J)[B", FN_PTR(getCode)}, {CC"disassembleCodeBlob", CC"(J)"STRING, FN_PTR(disassembleCodeBlob)}, {CC"executeCompiledMethodVarargs", CC"(["OBJECT NMETHOD")"OBJECT, FN_PTR(executeCompiledMethodVarargs)}, diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/graal/graalEnv.cpp --- a/src/share/vm/graal/graalEnv.cpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/graal/graalEnv.cpp Tue May 14 10:18:31 2013 +0200 @@ -526,7 +526,7 @@ // (Put nm into the task handle *before* publishing to the Java heap.) if (task != NULL) task->set_code(nm); - if (HotSpotInstalledCode::isDefault(installed_code())) { + if (HotSpotNmethod::isDefault(installed_code())) { if (entry_bci == InvocationEntryBci) { if (TieredCompilation) { // If there is an old version we're done with it diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/graal/graalJavaAccess.cpp --- a/src/share/vm/graal/graalJavaAccess.cpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/graal/graalJavaAccess.cpp Tue May 14 10:18:31 2013 +0200 @@ -39,8 +39,7 @@ fieldDescriptor fd; if (!ik->find_field(name_symbol, signature_symbol, &fd)) { ResourceMark rm; - tty->print_cr("Invalid layout of %s at %s", name_symbol->as_C_string(), ik->external_name()); - fatal("Invalid layout of preloaded class"); + fatal(err_msg("Invalid layout of %s at %s", name_symbol->as_C_string(), ik->external_name())); } guarantee(fd.is_static() == static_field, "static/instance mismatch"); dest_offset = fd.offset(); diff -r f44d7e24cebd -r cf0e31151830 src/share/vm/graal/graalJavaAccess.hpp --- a/src/share/vm/graal/graalJavaAccess.hpp Tue May 14 10:17:06 2013 +0200 +++ b/src/share/vm/graal/graalJavaAccess.hpp Tue May 14 10:18:31 2013 +0200 @@ -78,17 +78,22 @@ end_class \ start_class(HotSpotInstalledCode) \ long_field(HotSpotInstalledCode, codeBlob) \ - oop_field(HotSpotInstalledCode, method, "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod;") \ long_field(HotSpotInstalledCode, start) \ - boolean_field(HotSpotInstalledCode, isDefault) \ + end_class \ + start_class(HotSpotNmethod) \ + boolean_field(HotSpotNmethod, isDefault) \ end_class \ - start_class(HotSpotCompilationResult) \ - oop_field(HotSpotCompilationResult, comp, "Lcom/oracle/graal/api/code/CompilationResult;") \ - oop_field(HotSpotCompilationResult, method, "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod;") \ - oop_field(HotSpotCompilationResult, stubName, "Ljava/lang/String;") \ - int_field(HotSpotCompilationResult, entryBCI) \ - oop_field(HotSpotCompilationResult, sites, "[Lcom/oracle/graal/api/code/CompilationResult$Site;") \ - oop_field(HotSpotCompilationResult, exceptionHandlers, "[Lcom/oracle/graal/api/code/CompilationResult$ExceptionHandler;") \ + start_class(HotSpotCompiledCode) \ + oop_field(HotSpotCompiledCode, comp, "Lcom/oracle/graal/api/code/CompilationResult;") \ + oop_field(HotSpotCompiledCode, sites, "[Lcom/oracle/graal/api/code/CompilationResult$Site;") \ + oop_field(HotSpotCompiledCode, exceptionHandlers, "[Lcom/oracle/graal/api/code/CompilationResult$ExceptionHandler;") \ + end_class \ + start_class(HotSpotCompiledNmethod) \ + oop_field(HotSpotCompiledNmethod, method, "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod;") \ + int_field(HotSpotCompiledNmethod, entryBCI) \ + end_class \ + start_class(HotSpotCompiledRuntimeStub) \ + oop_field(HotSpotCompiledRuntimeStub, stubName, "Ljava/lang/String;") \ end_class \ start_class(HotSpotRuntimeCallTarget) \ long_field(HotSpotRuntimeCallTarget, address) \