changeset 5844:a432e6d43aa1

fixed bugs related to -G:+InlineVTableStubs and re-enabled it by default
author Doug Simon <doug.simon@oracle.com>
date Tue, 17 Jul 2012 11:39:50 +0200
parents f0837ce4d948
children da0eff406c2c aba97dd72b70 be5ca9960104
files graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalOptions.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/target/amd64/AMD64DirectCallOp.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/target/amd64/AMD64IndirectCallOp.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/target/amd64/HotSpotAMD64Backend.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Call.java src/share/vm/runtime/sharedRuntime.cpp
diffstat 6 files changed, 217 insertions(+), 79 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalOptions.java	Mon Jul 16 22:09:21 2012 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalOptions.java	Tue Jul 17 11:39:50 2012 +0200
@@ -181,7 +181,7 @@
     public static boolean GenSafepoints                      = true;
     public static boolean GenLoopSafepoints                  = true;
            static boolean UseTypeCheckHints                  = true;
-    public static boolean InlineVTableStubs                  = ____;
+    public static boolean InlineVTableStubs                  = true;
     public static boolean AlwaysInlineVTableStubs            = ____;
 
     public static boolean GenAssertionCode                   = ____;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/target/amd64/AMD64DirectCallOp.java	Tue Jul 17 11:39:50 2012 +0200
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.target.amd64;
+
+import static com.oracle.graal.hotspot.meta.HotSpotXirGenerator.*;
+import static com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind.*;
+
+import com.oracle.graal.api.code.CompilationResult.Mark;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.*;
+import com.oracle.graal.lir.amd64.*;
+import com.oracle.graal.lir.amd64.AMD64Call.DirectCallOp;
+import com.oracle.graal.lir.asm.*;
+import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind;
+import com.oracle.max.asm.*;
+import com.oracle.max.asm.target.amd64.*;
+
+/**
+ * A direct call that complies with the conventions for such calls in HotSpot.
+ * In particular, for calls using an inline cache, a MOVE instruction is
+ * emitted just prior to the aligned direct call. This instruction
+ * (which moves null in RAX) is patched by the C++ Graal code to replace the
+ * null constant with Universe::non_oop_word(), a special sentinel
+ * used for the initial value of the klassOop in an inline cache.
+ * <p>
+ * For non-inline cache calls, a static call stub is emitted.
+ */
+@Opcode("CALL_DIRECT")
+final class AMD64DirectCallOp extends DirectCallOp {
+
+    /**
+     * The mark emitted at the position of the direct call instruction.
+     * This is only recorded for calls that have an associated static
+     * call stub (i.e., {@code invokeKind == Static || invokeKind == Special}).
+     */
+    Mark callsiteMark;
+
+    private final InvokeKind invokeKind;
+
+    AMD64DirectCallOp(Object targetMethod, Value result, Value[] parameters, LIRFrameState state, InvokeKind invokeKind, LIR lir) {
+        super(targetMethod, result, parameters, state, null);
+        this.invokeKind = invokeKind;
+
+        if (invokeKind == Static || invokeKind == Special) {
+            lir.stubs.add(new AMD64Code() {
+                public String description() {
+                    return "static call stub for Invoke" + AMD64DirectCallOp.this.invokeKind;
+                }
+                @Override
+                public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
+                    assert callsiteMark != null : "static call site has not yet been emitted";
+                    tasm.recordMark(MARK_STATIC_CALL_STUB, callsiteMark);
+                    masm.movq(AMD64.rbx, 0L);
+                    Label dummy = new Label();
+                    masm.jmp(dummy);
+                    masm.bind(dummy);
+                }
+            });
+        }
+
+    }
+
+    @Override
+    public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
+        if (invokeKind == Static || invokeKind == Special) {
+            tasm.recordMark(invokeKind == Static ? MARK_INVOKESTATIC : MARK_INVOKESPECIAL);
+        } else {
+            assert invokeKind == Virtual || invokeKind == Interface;
+            // The mark for an invocation that uses an inline cache must be placed at the instruction
+            // that loads the klassOop from the inline cache so that the C++ code can find it
+            // and replace the inline null value with Universe::non_oop_word()
+            tasm.recordMark(invokeKind == Virtual ? MARK_INVOKEVIRTUAL : MARK_INVOKEINTERFACE);
+            AMD64Move.move(tasm, masm, AMD64.rax.asValue(Kind.Object), Constant.NULL_OBJECT);
+        }
+
+        emitAlignmentForDirectCall(tasm, masm);
+
+        if (invokeKind == Static || invokeKind == Special) {
+            callsiteMark = tasm.recordMark(null);
+        }
+
+        AMD64Call.directCall(tasm, masm, targetMethod, state);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/target/amd64/AMD64IndirectCallOp.java	Tue Jul 17 11:39:50 2012 +0200
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.target.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.hotspot.meta.HotSpotXirGenerator.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.*;
+import com.oracle.graal.lir.amd64.*;
+import com.oracle.graal.lir.amd64.AMD64Call.IndirectCallOp;
+import com.oracle.graal.lir.asm.*;
+import com.oracle.max.asm.target.amd64.*;
+
+/**
+ * A register indirect call that complies with the extra conventions for such calls in HotSpot.
+ * In particular, the methodOop of the callee must be in RBX for the case where a vtable entry's
+ * _from_compiled_entry is the address of an C2I adapter. Such adapters expect the target
+ * method to be in RBX.
+ */
+@Opcode("CALL_INDIRECT")
+final class AMD64IndirectCallOp extends IndirectCallOp {
+
+    /**
+     * Vtable stubs expect the methodOop in RBX.
+     */
+    public static final Register METHOD_OOP = AMD64.rbx;
+
+    @Use({REG}) protected Value methodOop;
+
+    AMD64IndirectCallOp(Object targetMethod, Value result, Value[] parameters, Value methodOop, Value targetAddress, LIRFrameState state) {
+        super(targetMethod, result, parameters, targetAddress, state, null);
+        this.methodOop = methodOop;
+    }
+
+    @Override
+    public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
+        tasm.recordMark(MARK_INLINE_INVOKEVIRTUAL);
+        Register callReg = asRegister(targetAddress);
+        assert callReg != METHOD_OOP;
+        AMD64Call.indirectCall(tasm, masm, callReg, targetMethod, state);
+    }
+
+    @Override
+    protected void verify() {
+        super.verify();
+        assert asRegister(methodOop) == METHOD_OOP;
+    }
+}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/target/amd64/HotSpotAMD64Backend.java	Mon Jul 16 22:09:21 2012 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/target/amd64/HotSpotAMD64Backend.java	Tue Jul 17 11:39:50 2012 +0200
@@ -32,7 +32,6 @@
 import java.util.*;
 
 import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.code.CompilationResult.Mark;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.*;
 import com.oracle.graal.compiler.gen.*;
@@ -45,7 +44,6 @@
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.graal.lir.asm.TargetMethodAssembler.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind;
@@ -54,6 +52,9 @@
 import com.oracle.max.asm.target.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.max.cri.xir.*;
 
+/**
+ * HotSpot AMD64 specific backend.
+ */
 public class HotSpotAMD64Backend extends Backend {
 
     public HotSpotAMD64Backend(CodeCacheProvider runtime, TargetDescription target) {
@@ -113,78 +114,36 @@
             CallingConvention cc = frameMap.registerConfig.getCallingConvention(JavaCall, signature, target(), false);
             frameMap.callsMethod(cc, JavaCall);
 
-            Value address = Constant.forLong(0L);
-
             ValueNode methodOopNode = null;
-
+            boolean inlineVirtualCall = false;
             if (callTarget.computedAddress() != null) {
                 // If a virtual dispatch address was computed, then an extra argument
                 // was append for passing the methodOop in RBX
                 methodOopNode = callTarget.arguments().remove(callTarget.arguments().size() - 1);
 
                 if (invokeKind == Virtual) {
-                    address = operand(callTarget.computedAddress());
+                    inlineVirtualCall = true;
                 } else {
                     // An invokevirtual may have been canonicalized into an invokespecial;
                     // the methodOop argument is ignored in this case
+                    methodOopNode = null;
                 }
             }
 
             List<Value> argList = visitInvokeArguments(cc, callTarget.arguments());
-
-            if (methodOopNode != null) {
-                Value methodOopArg = operand(methodOopNode);
-                emitMove(methodOopArg, AMD64.rbx.asValue());
-                argList.add(methodOopArg);
-            }
-
-            final Mark[] callsiteForStaticCallStub = {null};
-            if (invokeKind == Static || invokeKind == Special) {
-                lir.stubs.add(new AMD64Code() {
-                    public String description() {
-                        return "static call stub for Invoke" + invokeKind;
-                    }
-                    @Override
-                    public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
-                        assert callsiteForStaticCallStub[0] != null;
-                        tasm.recordMark(MARK_STATIC_CALL_STUB, callsiteForStaticCallStub);
-                        masm.movq(AMD64.rbx, 0L);
-                        Label dummy = new Label();
-                        masm.jmp(dummy);
-                        masm.bind(dummy);
-                    }
-                });
-            }
-
-            CallPositionListener cpl = new CallPositionListener() {
-                @Override
-                public void beforeCall(TargetMethodAssembler tasm) {
-                    if (invokeKind == Static || invokeKind == Special) {
-                        tasm.recordMark(invokeKind == Static ? MARK_INVOKESTATIC : MARK_INVOKESPECIAL);
-                    } else {
-                        // The mark for an invocation that uses an inline cache must be placed at the instruction
-                        // that loads the klassOop from the inline cache so that the C++ code can find it
-                        // and replace the inline null value with Universe::non_oop_word()
-                        assert invokeKind == Virtual || invokeKind == Interface;
-                        if (invokeKind == Virtual && callTarget.computedAddress() != null) {
-                            tasm.recordMark(MARK_INLINE_INVOKEVIRTUAL);
-                        } else {
-                            tasm.recordMark(invokeKind == Virtual ? MARK_INVOKEVIRTUAL : MARK_INVOKEINTERFACE);
-                            AMD64MacroAssembler masm = (AMD64MacroAssembler) tasm.asm;
-                            AMD64Move.move(tasm, masm, AMD64.rax.asValue(Kind.Object), Constant.NULL_OBJECT);
-                        }
-                    }
-                }
-                public void atCall(TargetMethodAssembler tasm) {
-                    if (invokeKind == Static || invokeKind == Special) {
-                        callsiteForStaticCallStub[0] = tasm.recordMark(null);
-                    }
-                }
-            };
+            Value[] parameters = argList.toArray(new Value[argList.size()]);
 
             LIRFrameState callState = stateFor(x.stateDuring(), null, x instanceof InvokeWithExceptionNode ? getLIRBlock(((InvokeWithExceptionNode) x).exceptionEdge()) : null, x.leafGraphId());
             Value result = resultOperandFor(x.node().kind());
-            emitCall(callTarget.targetMethod(), result, argList, address, callState, cpl);
+            if (!inlineVirtualCall) {
+                assert methodOopNode == null;
+                append(new AMD64DirectCallOp(callTarget.targetMethod(), result, parameters, callState, invokeKind, lir));
+            } else {
+                assert methodOopNode != null;
+                Value methodOop = AMD64.rbx.asValue();
+                emitMove(operand(methodOopNode), methodOop);
+                append(new AMD64IndirectCallOp(callTarget.targetMethod(), result, parameters, methodOop, operand(callTarget.computedAddress()), callState));
+            }
 
             if (isLegal(result)) {
                 setResult(x.node(), emitMove(result));
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Call.java	Mon Jul 16 22:09:21 2012 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Call.java	Tue Jul 17 11:39:50 2012 +0200
@@ -54,9 +54,28 @@
 
         @Override
         public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
-            callAlignment(tasm, masm, callPositionListener);
+            if (callPositionListener != null) {
+                callPositionListener.beforeCall(tasm);
+            }
+
+            emitAlignmentForDirectCall(tasm, masm);
+
+            if (callPositionListener != null) {
+                int pos = masm.codeBuffer.position();
+                callPositionListener.atCall(tasm);
+                assert pos == masm.codeBuffer.position() : "call position listener inserted code before an aligned call";
+            }
             directCall(tasm, masm, targetMethod, state);
         }
+
+        protected void emitAlignmentForDirectCall(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
+            // make sure that the displacement word of the call ends up word aligned
+            int offset = masm.codeBuffer.position();
+            offset += tasm.target.arch.machineCodeCallDisplacementOffset;
+            while (offset++ % tasm.target.wordSize != 0) {
+                masm.nop();
+            }
+        }
     }
 
     @Opcode("CALL_INDIRECT")
@@ -66,7 +85,7 @@
         @Use({REG}) protected Value targetAddress;
         @State protected LIRFrameState state;
 
-        private final Object targetMethod;
+        protected final Object targetMethod;
         protected final CallPositionListener callPositionListener;
 
         public IndirectCallOp(Object targetMethod, Value result, Value[] parameters, Value targetAddress, LIRFrameState state, CallPositionListener callPositionListener) {
@@ -88,25 +107,6 @@
         }
     }
 
-    public static void callAlignment(TargetMethodAssembler tasm, AMD64MacroAssembler masm, CallPositionListener callPositionListener) {
-        if (callPositionListener != null) {
-            callPositionListener.beforeCall(tasm);
-        }
-
-        // make sure that the displacement word of the call ends up word aligned
-        int offset = masm.codeBuffer.position();
-        offset += tasm.target.arch.machineCodeCallDisplacementOffset;
-        while (offset++ % tasm.target.wordSize != 0) {
-            masm.nop();
-        }
-
-        if (callPositionListener != null) {
-            int pos = masm.codeBuffer.position();
-            callPositionListener.atCall(tasm);
-            assert pos == masm.codeBuffer.position() : "call position listener inserted code before an aligned call";
-        }
-    }
-
     public static void directCall(TargetMethodAssembler tasm, AMD64MacroAssembler masm, Object target, LIRFrameState info) {
         int before = masm.codeBuffer.position();
         if (target instanceof RuntimeCall) {
--- a/src/share/vm/runtime/sharedRuntime.cpp	Mon Jul 16 22:09:21 2012 +0200
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Jul 17 11:39:50 2012 +0200
@@ -1697,6 +1697,8 @@
 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
   methodOop moop(method);
 
+  assert(moop->is_oop(false) && moop->is_method(), "method oop from call site is invalid");
+
   address entry_point = moop->from_compiled_entry();
 
   // It's possible that deoptimization can occur at a call site which hasn't