changeset 7125:1baf7f1e3f23

decoupled C++ Graal runtime from C1
author Doug Simon <doug.simon@oracle.com>
date Mon, 03 Dec 2012 15:32:17 +0100
parents ab65fa23f8e9
children ce248dc0a656
files graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java graal/com.oracle.graal.interpreter/src/com/oracle/graal/interpreter/BytecodeInterpreter.java make/bsd/makefiles/graal.make make/bsd/makefiles/vm.make make/linux/makefiles/graal.make make/linux/makefiles/vm.make make/solaris/makefiles/graal.make make/solaris/makefiles/vm.make src/cpu/x86/vm/c1_Runtime1_x86.cpp src/cpu/x86/vm/graalGlobals_x86.hpp src/cpu/x86/vm/graalRuntime_x86.cpp src/cpu/x86/vm/graalStubAssembler_x86.cpp src/cpu/x86/vm/sharedRuntime_x86_32.cpp src/share/vm/c1/c1_globals.hpp src/share/vm/code/codeBlob.cpp src/share/vm/code/codeBlob.hpp src/share/vm/code/compiledIC.cpp src/share/vm/code/nmethod.cpp src/share/vm/code/nmethod.hpp src/share/vm/compiler/abstractCompiler.hpp src/share/vm/compiler/compileBroker.cpp src/share/vm/compiler/oopMap.cpp src/share/vm/graal/graalCodeInstaller.cpp src/share/vm/graal/graalCompiler.cpp src/share/vm/graal/graalCompiler.hpp src/share/vm/graal/graalCompilerToVM.cpp src/share/vm/graal/graalEnv.cpp src/share/vm/graal/graalGlobals.cpp src/share/vm/graal/graalGlobals.hpp src/share/vm/graal/graalRuntime.cpp src/share/vm/graal/graalRuntime.hpp src/share/vm/interpreter/interpreter.cpp src/share/vm/memory/allocation.hpp src/share/vm/oops/klass.hpp src/share/vm/precompiled/precompiled.hpp src/share/vm/prims/jvm.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/arguments.hpp src/share/vm/runtime/compilationPolicy.cpp src/share/vm/runtime/globals.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/globals_extension.hpp src/share/vm/runtime/safepoint.cpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/thread.cpp src/share/vm/utilities/globalDefinitions.hpp src/share/vm/utilities/top.hpp
diffstat 48 files changed, 2349 insertions(+), 115 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java	Mon Dec 03 13:56:13 2012 +0100
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java	Mon Dec 03 15:32:17 2012 +0100
@@ -72,13 +72,13 @@
                 /* arg0:         a */ arg(0, Kind.Double),
                 /* arg1:         b */ arg(1, Kind.Double));
 
-        addRuntimeCall(MONITORENTER, config.fastMonitorEnterStub,
+        addRuntimeCall(MONITORENTER, config.monitorEnterStub,
                 /*        temps */ new Register[] {rax, rbx},
                 /*          ret */ ret(Kind.Void),
                 /* arg0: object */ arg(0, Kind.Object),
                 /* arg1:   lock */ arg(1, word));
 
-        addRuntimeCall(MONITOREXIT, c.fastMonitorExitStub,
+        addRuntimeCall(MONITOREXIT, c.monitorExitStub,
                 /*        temps */ new Register[] {rax, rbx},
                 /*          ret */ ret(Kind.Void),
                 /* arg0: object */ arg(0, Kind.Object),
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Mon Dec 03 13:56:13 2012 +0100
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Mon Dec 03 15:32:17 2012 +0100
@@ -24,7 +24,7 @@
 
 
 /**
- * Used to communicate configuration details, runtime offsets, etc. to graal upon compileMethod.
+ * Used to communicate configuration details, runtime offsets, etc. to Graal upon compileMethod.
  */
 public final class HotSpotVMConfig extends CompilerObject {
 
@@ -245,8 +245,8 @@
     public long inlineCacheMissStub;
     public long handleExceptionStub;
     public long handleDeoptStub;
-    public long fastMonitorEnterStub;
-    public long fastMonitorExitStub;
+    public long monitorEnterStub;
+    public long monitorExitStub;
     public long verifyOopStub;
     public long vmErrorStub;
     public long deoptimizeStub;
--- a/graal/com.oracle.graal.interpreter/src/com/oracle/graal/interpreter/BytecodeInterpreter.java	Mon Dec 03 13:56:13 2012 +0100
+++ b/graal/com.oracle.graal.interpreter/src/com/oracle/graal/interpreter/BytecodeInterpreter.java	Mon Dec 03 15:32:17 2012 +0100
@@ -65,11 +65,11 @@
         GraalRuntime runtime = Graal.getRuntime();
         this.runtimeInterface = runtime.getCapability(RuntimeInterpreterInterface.class);
         if (this.runtimeInterface == null) {
-            throw new UnsupportedOperationException("The provided graal runtime does not support the required capability " + RuntimeInterpreterInterface.class.getName() + ".");
+            throw new UnsupportedOperationException("The provided Graal runtime does not support the required capability " + RuntimeInterpreterInterface.class.getName() + ".");
         }
         this.metaAccessProvider = runtime.getCapability(MetaAccessProvider.class);
         if (this.metaAccessProvider == null) {
-            throw new UnsupportedOperationException("The provided graal runtime does not support the required capability " + MetaAccessProvider.class.getName() + ".");
+            throw new UnsupportedOperationException("The provided Graal runtime does not support the required capability " + MetaAccessProvider.class.getName() + ".");
         }
 
         this.rootMethod = resolveRootMethod();
--- a/make/bsd/makefiles/graal.make	Mon Dec 03 13:56:13 2012 +0100
+++ b/make/bsd/makefiles/graal.make	Mon Dec 03 15:32:17 2012 +0100
@@ -29,4 +29,4 @@
 
 VM_SUBDIR = graal
 
-CFLAGS += -DGRAAL -DCOMPILER1
+CFLAGS += -DGRAAL
--- a/make/bsd/makefiles/vm.make	Mon Dec 03 13:56:13 2012 +0100
+++ b/make/bsd/makefiles/vm.make	Mon Dec 03 15:32:17 2012 +0100
@@ -175,8 +175,6 @@
 
 SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
 
-GRAAL_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
-GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/c1
 GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal)
 GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal
 
@@ -194,19 +192,19 @@
 COMPILER1_SPECIFIC_FILES := c1_\*
 SHARK_SPECIFIC_FILES     := shark
 ZERO_SPECIFIC_FILES      := zero
-GRAAL_SPECIFIC_FILES     := graal
+GRAAL_SPECIFIC_FILES     := graal\*
 
 # Always exclude these.
 Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
 
 # Exclude per type.
-Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
-Src_Files_EXCLUDE/GRAAL     := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/GRAAL     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
 
 Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
 
--- a/make/linux/makefiles/graal.make	Mon Dec 03 13:56:13 2012 +0100
+++ b/make/linux/makefiles/graal.make	Mon Dec 03 15:32:17 2012 +0100
@@ -29,4 +29,4 @@
 
 VM_SUBDIR = graal
 
-CFLAGS += -DGRAAL -DCOMPILER1
+CFLAGS += -DGRAAL
--- a/make/linux/makefiles/vm.make	Mon Dec 03 13:56:13 2012 +0100
+++ b/make/linux/makefiles/vm.make	Mon Dec 03 15:32:17 2012 +0100
@@ -177,8 +177,6 @@
 
 SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
 
-GRAAL_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
-GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/c1
 GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal)
 GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal
 
@@ -196,19 +194,19 @@
 COMPILER1_SPECIFIC_FILES := c1_\*
 SHARK_SPECIFIC_FILES     := shark
 ZERO_SPECIFIC_FILES      := zero
-GRAAL_SPECIFIC_FILES     := graal
+GRAAL_SPECIFIC_FILES     := graal\*
 
 # Always exclude these.
 Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
 
 # Exclude per type.
-Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
-Src_Files_EXCLUDE/GRAAL     := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/GRAAL     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
 
 Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
 
--- a/make/solaris/makefiles/graal.make	Mon Dec 03 13:56:13 2012 +0100
+++ b/make/solaris/makefiles/graal.make	Mon Dec 03 15:32:17 2012 +0100
@@ -29,4 +29,4 @@
 
 VM_SUBDIR = graal
 
-CFLAGS += -DGRAAL -DCOMPILER1
+CFLAGS += -DGRAAL
--- a/make/solaris/makefiles/vm.make	Mon Dec 03 13:56:13 2012 +0100
+++ b/make/solaris/makefiles/vm.make	Mon Dec 03 15:32:17 2012 +0100
@@ -190,8 +190,6 @@
 COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/libadt
 COMPILER2_PATHS +=  $(GENERATED)/adfiles
 
-GRAAL_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
-GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/c1
 GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal)
 GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal
 
@@ -215,13 +213,13 @@
 Src_Files_EXCLUDE := dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
 
 # Exclude per type.
-Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
-Src_Files_EXCLUDE/GRAAL     := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/GRAAL     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
 
 Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
 
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -1981,7 +1981,7 @@
     case graal_verify_oop_id: {
       // We use enter & leave so that a better stack trace is produced in the hs_err file
       __ enter();
-      __ verify_oop(r13, "graal verify oop");
+      __ verify_oop(r13, "Graal verify oop");
       __ leave();
       __ ret(0);
       break;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/graalGlobals_x86.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_GRAALGLOBALS_X86_HPP
+#define CPU_X86_VM_GRAALGLOBALS_X86_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Sets the default values for platform dependent flags used by the Graal compiler.
+// (see graalGlobals.hpp)
+
+define_pd_global(bool, BackgroundCompilation,        true );
+define_pd_global(bool, UseTLAB,                      true );
+define_pd_global(bool, ResizeTLAB,                   true );
+define_pd_global(bool, InlineIntrinsics,             true );
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, TieredCompilation,            false);
+define_pd_global(intx, BackEdgeThreshold,            100000);
+
+define_pd_global(intx, OnStackReplacePercentage,     933  );
+define_pd_global(intx, FreqInlineSize,               325  );
+define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
+define_pd_global(uintx,MetaspaceSize,                12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
+define_pd_global(bool, CICompileOSR,                 true );
+
+define_pd_global(bool, ProfileTraps,                 true );
+define_pd_global(bool, UseOnStackReplacement,        true);
+define_pd_global(intx, CompileThreshold,             10000 );
+define_pd_global(intx, InitialCodeCacheSize,         16*M  );
+define_pd_global(intx, ReservedCodeCacheSize,        64*M );
+define_pd_global(bool, ProfileInterpreter,           true );
+define_pd_global(intx, CodeCacheExpansionSize,       64*K );
+define_pd_global(uintx,CodeCacheMinBlockLength,      4);
+define_pd_global(intx, TypeProfileWidth,             8);
+
+define_pd_global(bool, RoundFPResults,               true );
+
+define_pd_global(bool, LIRFillDelaySlots,            false);
+define_pd_global(bool, OptimizeSinglePrecision,      true );
+define_pd_global(bool, CSEArrayLength,               false);
+define_pd_global(bool, TwoOperandLIRForm,            true );
+
+define_pd_global(intx, SafepointPollOffset,          256  );
+
+#endif // CPU_X86_VM_GRAALGLOBALS_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/graalRuntime_x86.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -0,0 +1,1193 @@
+/*
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "graal/graalRuntime.hpp"
+#include "interpreter/interpreter.hpp"
+#include "nativeInst_x86.hpp"
+#include "oops/compiledICHolder.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "register_x86.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/vframeArray.hpp"
+#include "vmreg_x86.inline.hpp"
+
+static void restore_live_registers(GraalStubAssembler* sasm, bool restore_fpu_registers = true);
+
+// Implementation of GraalStubAssembler
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
+  // setup registers
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
+  assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
+  assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
+  assert(args_size >= 0, "illegal args_size");
+  bool align_stack = false;
+#ifdef _LP64
+  // At a method handle call, the stack may not be properly aligned
+  // when returning with an exception.
+  align_stack = (stub_id() == false /*GraalRuntime::handle_exception_from_callee_id*/);
+#endif
+
+#ifdef _LP64
+  mov(c_rarg0, thread);
+  set_num_rt_args(0); // Nothing on stack
+#else
+  set_num_rt_args(1 + args_size);
+
+  // push java thread (becomes first argument of C function)
+  get_thread(thread);
+  push(thread);
+#endif // _LP64
+
+  int call_offset;
+  if (!align_stack) {
+    set_last_Java_frame(thread, noreg, rbp, NULL);
+  } else {
+    address the_pc = pc();
+    call_offset = offset();
+    set_last_Java_frame(thread, noreg, rbp, the_pc);
+    andptr(rsp, -(StackAlignmentInBytes));    // Align stack
+  }
+
+  // do the call
+  call(RuntimeAddress(entry));
+  if (!align_stack) {
+    call_offset = offset();
+  }
+  // verify callee-saved register
+#ifdef ASSERT
+  guarantee(thread != rax, "change this code");
+  push(rax);
+  { Label L;
+    get_thread(rax);
+    cmpptr(thread, rax);
+    jcc(Assembler::equal, L);
+    int3();
+    stop("GraalStubAssembler::call_RT: rdi not callee saved?");
+    bind(L);
+  }
+  pop(rax);
+#endif
+  reset_last_Java_frame(thread, true, align_stack);
+
+  // discard thread and arguments
+  NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
+
+  // check for pending exceptions
+  { Label L;
+    cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+    jcc(Assembler::equal, L);
+    // exception pending => remove activation and forward to exception handler
+    movptr(rax, Address(thread, Thread::pending_exception_offset()));
+    // make sure that the vm_results are cleared
+    if (oop_result1->is_valid()) {
+      movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
+    }
+    if (metadata_result->is_valid()) {
+      movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+    }
+#ifdef GRAAL
+    // (thomaswue) Deoptimize in case of an exception.
+    restore_live_registers(this, false);
+    movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+    leave();
+    movl(rscratch1, Deoptimization::make_trap_request(Deoptimization::Reason_constraint, Deoptimization::Action_reinterpret));
+    jump(RuntimeAddress(SharedRuntime::deopt_blob()->uncommon_trap()));
+#else
+    if (frame_size() == no_frame_size) {
+      leave();
+      jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+    } else if (_stub_id == GraalRuntime::forward_exception_id) {
+      should_not_reach_here();
+    } else {
+      jump(RuntimeAddress(GraalRuntime::entry_for(GraalRuntime::forward_exception_id)));
+    }
+#endif
+    bind(L);
+  }
+  // get oop results if there are any and reset the values in the thread
+  if (oop_result1->is_valid()) {
+    get_vm_result(oop_result1, thread);
+  }
+  if (metadata_result->is_valid()) {
+    get_vm_result_2(metadata_result, thread);
+  }
+  return call_offset;
+}
+
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
+#ifdef _LP64
+  mov(c_rarg1, arg1);
+#else
+  push(arg1);
+#endif // _LP64
+  return call_RT(oop_result1, metadata_result, entry, 1);
+}
+
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
+#ifdef _LP64
+  if (c_rarg1 == arg2) {
+    if (c_rarg2 == arg1) {
+      xchgq(arg1, arg2);
+    } else {
+      mov(c_rarg2, arg2);
+      mov(c_rarg1, arg1);
+    }
+  } else {
+    mov(c_rarg1, arg1);
+    mov(c_rarg2, arg2);
+  }
+#else
+  push(arg2);
+  push(arg1);
+#endif // _LP64
+  return call_RT(oop_result1, metadata_result, entry, 2);
+}
+
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
+#ifdef _LP64
+  // if there is any conflict use the stack
+  if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
+      arg2 == c_rarg1 || arg1 == c_rarg3 ||
+      arg3 == c_rarg1 || arg1 == c_rarg2) {
+    push(arg3);
+    push(arg2);
+    push(arg1);
+    pop(c_rarg1);
+    pop(c_rarg2);
+    pop(c_rarg3);
+  } else {
+    mov(c_rarg1, arg1);
+    mov(c_rarg2, arg2);
+    mov(c_rarg3, arg3);
+  }
+#else
+  push(arg3);
+  push(arg2);
+  push(arg1);
+#endif // _LP64
+  return call_RT(oop_result1, metadata_result, entry, 3);
+}
+
+// Implementation of GraalStubFrame
+
+class GraalStubFrame: public StackObj {
+ private:
+  GraalStubAssembler* _sasm;
+
+ public:
+  GraalStubFrame(GraalStubAssembler* sasm, const char* name, bool must_gc_arguments);
+  ~GraalStubFrame();
+};
+
+
+#define __ _sasm->
+
+GraalStubFrame::GraalStubFrame(GraalStubAssembler* sasm, const char* name, bool must_gc_arguments) {
+  _sasm = sasm;
+  __ set_info(name, must_gc_arguments);
+  __ enter();
+}
+
+GraalStubFrame::~GraalStubFrame() {
+  __ leave();
+  __ ret(0);
+}
+
+#undef __
+
+
+// Implementation of GraalRuntime
+
+const int float_regs_as_doubles_size_in_slots = FloatRegisterImpl::number_of_registers * 2;
+const int xmm_regs_as_doubles_size_in_slots = XMMRegisterImpl::number_of_registers * 2;
+
+// Stack layout for saving/restoring  all the registers needed during a runtime
+// call (this includes deoptimization)
+// Note: note that users of this frame may well have arguments to some runtime
+// while these values are on the stack. These positions neglect those arguments
+// but the code in save_live_registers will take the argument count into
+// account.
+//
+#ifdef _LP64
+  #define SLOT2(x) x,
+  #define SLOT_PER_WORD 2
+#else
+  #define SLOT2(x)
+  #define SLOT_PER_WORD 1
+#endif // _LP64
+
+enum reg_save_layout {
+  // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
+  // happen and will assert if the stack size we create is misaligned
+#ifdef _LP64
+  align_dummy_0, align_dummy_1,
+#endif // _LP64
+#ifdef _WIN64
+  // Windows always allocates space for it's argument registers (see
+  // frame::arg_reg_save_area_bytes).
+  arg_reg_save_1, arg_reg_save_1H,                                                          // 0, 4
+  arg_reg_save_2, arg_reg_save_2H,                                                          // 8, 12
+  arg_reg_save_3, arg_reg_save_3H,                                                          // 16, 20
+  arg_reg_save_4, arg_reg_save_4H,                                                          // 24, 28
+#endif // _WIN64
+  xmm_regs_as_doubles_off,                                                                  // 32
+  float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots,  // 160
+  fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots,          // 224
+  // fpu_state_end_off is exclusive
+  fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD),                // 352
+  marker = fpu_state_end_off, SLOT2(markerH)                                                // 352, 356
+  extra_space_offset,                                                                       // 360
+#ifdef _LP64
+  r15_off = extra_space_offset, r15H_off,                                                   // 360, 364
+  r14_off, r14H_off,                                                                        // 368, 372
+  r13_off, r13H_off,                                                                        // 376, 380
+  r12_off, r12H_off,                                                                        // 384, 388
+  r11_off, r11H_off,                                                                        // 392, 396
+  r10_off, r10H_off,                                                                        // 400, 404
+  r9_off, r9H_off,                                                                          // 408, 412
+  r8_off, r8H_off,                                                                          // 416, 420
+  rdi_off, rdiH_off,                                                                        // 424, 428
+#else
+  rdi_off = extra_space_offset,
+#endif // _LP64
+  rsi_off, SLOT2(rsiH_off)                                                                  // 432, 436
+  rbp_off, SLOT2(rbpH_off)                                                                  // 440, 444
+  rsp_off, SLOT2(rspH_off)                                                                  // 448, 452
+  rbx_off, SLOT2(rbxH_off)                                                                  // 456, 460
+  rdx_off, SLOT2(rdxH_off)                                                                  // 464, 468
+  rcx_off, SLOT2(rcxH_off)                                                                  // 472, 476
+  rax_off, SLOT2(raxH_off)                                                                  // 480, 484
+  saved_rbp_off, SLOT2(saved_rbpH_off)                                                      // 488, 492
+  return_off, SLOT2(returnH_off)                                                            // 496, 500
+  reg_save_frame_size   // As noted: neglects any parameters to runtime                     // 504
+};
+
+// Save registers which might be killed by calls into the runtime.
+// Tries to smart about FP registers.  In particular we separate
+// saving and describing the FPU registers for deoptimization since we
+// have to save the FPU registers twice if we describe them and on P4
+// saving FPU registers which don't contain anything appears
+// expensive.  The deopt blob is the only thing which needs to
+// describe FPU registers.  In all other cases it should be sufficient
+// to simply save their current value.
+
+static OopMap* generate_oop_map(GraalStubAssembler* sasm, int num_rt_args,
+                                bool save_fpu_registers = true) {
+
+  // In 64bit all the args are in regs so there are no additional stack slots
+  LP64_ONLY(num_rt_args = 0);
+  LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
+  int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
+  sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
+
+  // record saved value locations in an OopMap
+  // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
+  OopMap* map = new OopMap(frame_size_in_slots, 0);
+  map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
+#ifdef _LP64
+  map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args),  r8->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args),  r9->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
+
+  // This is stupid but needed.
+  map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
+
+  map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args),  r8->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args),  r9->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
+#endif // _LP64
+
+  if (save_fpu_registers) {
+    if (UseSSE < 2) {
+      int fpu_off = float_regs_as_doubles_off;
+      for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
+        VMReg fpu_name_0 = as_FloatRegister(n)->as_VMReg();
+        map->set_callee_saved(VMRegImpl::stack2reg(fpu_off +     num_rt_args), fpu_name_0);
+        // %%% This is really a waste but we'll keep things as they were for now
+        if (true) {
+          map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
+        }
+        fpu_off += 2;
+      }
+      assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
+    }
+
+    if (UseSSE >= 2) {
+      int xmm_off = xmm_regs_as_doubles_off;
+      for (int n = 0; n < XMMRegisterImpl::number_of_registers; n++) {
+        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
+        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
+        // %%% This is really a waste but we'll keep things as they were for now
+        if (true) {
+          map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
+        }
+        xmm_off += 2;
+      }
+      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
+
+    } else if (UseSSE == 1) {
+      int xmm_off = xmm_regs_as_doubles_off;
+      for (int n = 0; n < XMMRegisterImpl::number_of_registers; n++) {
+        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
+        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
+        xmm_off += 2;
+      }
+      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
+    }
+  }
+
+  return map;
+}
+
+#define __ sasm->
+
+static OopMap* save_live_registers(GraalStubAssembler* sasm, int num_rt_args,
+                                   bool save_fpu_registers = true) {
+  __ block_comment("save_live_registers");
+
+  __ pusha();         // integer registers
+
+  // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
+  // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
+
+  __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
+
+#ifdef ASSERT
+  __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
+#endif
+
+  if (save_fpu_registers) {
+    if (UseSSE < 2) {
+      // save FPU stack
+      __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
+      __ fwait();
+
+#ifdef ASSERT
+      Label ok;
+      __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
+      __ jccb(Assembler::equal, ok);
+      __ stop("corrupted control word detected");
+      __ bind(ok);
+#endif
+
+      // Reset the control word to guard against exceptions being unmasked
+      // since fstp_d can cause FPU stack underflow exceptions.  Write it
+      // into the on stack copy and then reload that to make sure that the
+      // current and future values are correct.
+      __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
+      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
+
+      // Save the FPU registers in de-opt-able form
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
+    }
+
+    if (UseSSE >= 2) {
+      // save XMM registers
+      // XMM registers can contain float or double values, but this is not known here,
+      // so always save them as doubles.
+      // note that float values are _not_ converted automatically, so for float values
+      // the second word contains only garbage data.
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
+#ifdef _LP64
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
+#endif // _LP64
+    } else if (UseSSE == 1) {
+      // save XMM registers as float because double not supported without SSE2
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
+    }
+  }
+
+  // FPU stack must be empty now
+  __ verify_FPU(0, "save_live_registers");
+
+  return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
+}
+
+
+static void restore_fpu(GraalStubAssembler* sasm, bool restore_fpu_registers = true) {
+  if (restore_fpu_registers) {
+    if (UseSSE >= 2) {
+      // restore XMM registers
+      __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
+      __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
+      __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+      __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+      __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+      __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+      __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+      __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
+#ifdef _LP64
+      __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
+      __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
+      __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
+      __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
+      __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
+      __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
+      __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
+      __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
+#endif // _LP64
+    } else if (UseSSE == 1) {
+      // restore XMM registers
+      __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
+      __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
+      __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+      __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+      __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+      __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+      __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+      __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
+    }
+
+    if (UseSSE < 2) {
+      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
+    } else {
+      // check that FPU stack is really empty
+      __ verify_FPU(0, "restore_live_registers");
+    }
+
+  } else {
+    // check that FPU stack is really empty
+    __ verify_FPU(0, "restore_live_registers");
+  }
+
+#ifdef ASSERT
+  {
+    Label ok;
+    __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
+    __ jcc(Assembler::equal, ok);
+    __ stop("bad offsets in frame");
+    __ bind(ok);
+  }
+#endif // ASSERT
+
+  __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
+}
+
+
+static void restore_live_registers(GraalStubAssembler* sasm, bool restore_fpu_registers/* = true*/) {
+  __ block_comment("restore_live_registers");
+
+  restore_fpu(sasm, restore_fpu_registers);
+  __ popa();
+}
+
+
+static void restore_live_registers_except_rax(GraalStubAssembler* sasm, bool restore_fpu_registers = true) {
+  __ block_comment("restore_live_registers_except_rax");
+
+  restore_fpu(sasm, restore_fpu_registers);
+
+#ifdef _LP64
+  __ movptr(r15, Address(rsp, 0));
+  __ movptr(r14, Address(rsp, wordSize));
+  __ movptr(r13, Address(rsp, 2 * wordSize));
+  __ movptr(r12, Address(rsp, 3 * wordSize));
+  __ movptr(r11, Address(rsp, 4 * wordSize));
+  __ movptr(r10, Address(rsp, 5 * wordSize));
+  __ movptr(r9,  Address(rsp, 6 * wordSize));
+  __ movptr(r8,  Address(rsp, 7 * wordSize));
+  __ movptr(rdi, Address(rsp, 8 * wordSize));
+  __ movptr(rsi, Address(rsp, 9 * wordSize));
+  __ movptr(rbp, Address(rsp, 10 * wordSize));
+  // skip rsp
+  __ movptr(rbx, Address(rsp, 12 * wordSize));
+  __ movptr(rdx, Address(rsp, 13 * wordSize));
+  __ movptr(rcx, Address(rsp, 14 * wordSize));
+
+  __ addptr(rsp, 16 * wordSize);
+#else
+
+  __ pop(rdi);
+  __ pop(rsi);
+  __ pop(rbp);
+  __ pop(rbx); // skip this value
+  __ pop(rbx);
+  __ pop(rdx);
+  __ pop(rcx);
+  __ addptr(rsp, BytesPerWord);
+#endif // _LP64
+}
+
+OopMapSet* GraalRuntime::generate_handle_exception(StubID id, GraalStubAssembler *sasm) {
+  __ block_comment("generate_handle_exception");
+
+  // incoming parameters
+  const Register exception_oop = rax;
+  const Register exception_pc  = rdx;
+  // other registers used in this stub
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+
+  // Save registers, if required.
+  OopMapSet* oop_maps = new OopMapSet();
+  OopMap* oop_map = NULL;
+  switch (id) {
+    case graal_handle_exception_nofpu_id:
+      // At this point all registers MAY be live.
+      oop_map = save_live_registers(sasm, 1 /*thread*/, id == graal_handle_exception_nofpu_id);
+      break;
+    default:  ShouldNotReachHere();
+  }
+
+#ifdef TIERED
+  // C2 can leave the fpu stack dirty
+  if (UseSSE < 2) {
+    __ empty_FPU_stack();
+  }
+#endif // TIERED
+
+  // verify that only rax, and rdx is valid at this time
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rsi, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+  // verify that rax, contains a valid exception
+  __ verify_not_null_oop(exception_oop);
+
+  // load address of JavaThread object for thread-local data
+  NOT_LP64(__ get_thread(thread);)
+
+#ifdef ASSERT
+  // check that fields in JavaThread for exception oop and issuing pc are
+  // empty before writing to them
+  Label oop_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
+  __ jcc(Assembler::equal, oop_empty);
+  __ stop("exception oop already set");
+  __ bind(oop_empty);
+
+  Label pc_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
+  __ jcc(Assembler::equal, pc_empty);
+  __ stop("exception pc already set");
+  __ bind(pc_empty);
+#endif
+
+  // save exception oop and issuing pc into JavaThread
+  // (exception handler will load it from here)
+  __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
+  __ movptr(Address(thread, JavaThread::exception_pc_offset()),  exception_pc);
+
+  // patch throwing pc into return address (has bci & oop map)
+  __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
+
+  // compute the exception handler.
+  // the exception oop and the throwing pc are read from the fields in JavaThread
+  int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
+  oop_maps->add_gc_map(call_offset, oop_map);
+
+  // rax: handler address
+  //      will be the deopt blob if nmethod was deoptimized while we looked up
+  //      handler regardless of whether handler existed in the nmethod.
+
+  // only rax, is valid at this time, all other registers have been destroyed by the runtime call
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rdx, 0xDEAD);
+  __ movptr(rsi, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+  // patch the return address, this stub will directly return to the exception handler
+  __ movptr(Address(rbp, 1*BytesPerWord), rax);
+
+  switch (id) {
+    case graal_handle_exception_nofpu_id:
+      // Restore the registers that were saved at the beginning.
+      restore_live_registers(sasm, id == graal_handle_exception_nofpu_id);
+      break;
+    default:  ShouldNotReachHere();
+  }
+
+  return oop_maps;
+}
+
+void GraalRuntime::generate_unwind_exception(GraalStubAssembler *sasm) {
+  // incoming parameters
+  const Register exception_oop = rax;
+  // callee-saved copy of exception_oop during runtime call
+  const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
+  // other registers used in this stub
+  const Register exception_pc = rdx;
+  const Register handler_addr = rbx;
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+
+  // verify that only rax is valid at this time
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rdx, 0xDEAD);
+  __ movptr(rsi, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+#ifdef ASSERT
+  // check that fields in JavaThread for exception oop and issuing pc are empty
+  NOT_LP64(__ get_thread(thread);)
+  Label oop_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
+  __ jcc(Assembler::equal, oop_empty);
+  __ stop("exception oop must be empty");
+  __ bind(oop_empty);
+
+  Label pc_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
+  __ jcc(Assembler::equal, pc_empty);
+  __ stop("exception pc must be empty");
+  __ bind(pc_empty);
+#endif
+
+  // clear the FPU stack in case any FPU results are left behind
+  __ empty_FPU_stack();
+
+  // save exception_oop in callee-saved register to preserve it during runtime calls
+  __ verify_not_null_oop(exception_oop);
+  __ movptr(exception_oop_callee_saved, exception_oop);
+
+  NOT_LP64(__ get_thread(thread);)
+  // Get return address (is on top of stack after leave).
+  __ movptr(exception_pc, Address(rsp, 0));
+
+  // search the exception handler address of the caller (using the return address)
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
+  // rax: exception handler address of the caller
+
+  // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rdx, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+  // move result of call into correct register
+  __ movptr(handler_addr, rax);
+
+  // Restore exception oop to RAX (required convention of exception handler).
+  __ movptr(exception_oop, exception_oop_callee_saved);
+
+  // verify that there is really a valid exception in rax
+  __ verify_not_null_oop(exception_oop);
+
+  // get throwing pc (= return address).
+  // rdx has been destroyed by the call, so it must be set again
+  // the pop is also necessary to simulate the effect of a ret(0)
+  __ pop(exception_pc);
+
+  // Restore SP from BP if the exception PC is a method handle call site.
+  NOT_LP64(__ get_thread(thread);)
+  __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
+  __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
+
+  // continue at exception handler (return address removed)
+  // note: do *not* remove arguments when unwinding the
+  //       activation since the caller assumes having
+  //       all arguments on the stack when entering the
+  //       runtime to determine the exception handler
+  //       (GC happens at call site with arguments!)
+  // rax: exception oop
+  // rdx: throwing pc
+  // rbx: exception handler
+  __ jmp(handler_addr);
+}
+
+OopMapSet* GraalRuntime::generate_code_for(StubID id, GraalStubAssembler* sasm) {
+
+  // for better readability
+  const bool must_gc_arguments = true;
+  const bool dont_gc_arguments = false;
+
+  // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
+  bool save_fpu_registers = true;
+
+  // stub code & info for the different stubs
+  OopMapSet* oop_maps = NULL;
+  switch (id) {
+
+    case graal_new_instance_id:
+      {
+        Register klass = rdx; // Incoming
+        Register obj   = rax; // Result
+        __ set_info("new_instance", dont_gc_arguments);
+        __ enter();
+        OopMap* map = save_live_registers(sasm, 2);
+        int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers_except_rax(sasm);
+        __ verify_oop(obj);
+        __ leave();
+        __ ret(0);
+
+        // rax,: new instance
+      }
+
+      break;
+
+    case graal_new_type_array_id:
+    case graal_new_object_array_id:
+      {
+        Register length   = rbx; // Incoming
+        Register klass    = rdx; // Incoming
+        Register obj      = rax; // Result
+
+        if (id == graal_new_type_array_id) {
+          __ set_info("new_type_array", dont_gc_arguments);
+        } else {
+          __ set_info("new_object_array", dont_gc_arguments);
+        }
+
+#ifdef ASSERT
+        // assert object type is really an array of the proper kind
+        {
+          Label ok;
+          Register t0 = obj;
+          __ movl(t0, Address(klass, Klass::layout_helper_offset()));
+          __ sarl(t0, Klass::_lh_array_tag_shift);
+          int tag = ((id == graal_new_type_array_id)
+                     ? Klass::_lh_array_tag_type_value
+                     : Klass::_lh_array_tag_obj_value);
+          __ cmpl(t0, tag);
+          __ jcc(Assembler::equal, ok);
+          __ stop("assert(is an array klass)");
+          __ should_not_reach_here();
+          __ bind(ok);
+        }
+#endif // ASSERT
+        __ enter();
+        OopMap* map = save_live_registers(sasm, 3);
+        int call_offset;
+        if (id == graal_new_type_array_id) {
+          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
+        } else {
+          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
+        }
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers_except_rax(sasm);
+
+        __ verify_oop(obj);
+        __ leave();
+        __ ret(0);
+
+        // rax,: new array
+      }
+      break;
+
+    case graal_new_multi_array_id:
+      { GraalStubFrame f(sasm, "new_multi_array", dont_gc_arguments);
+        // rax,: klass
+        // rbx,: rank
+        // rcx: address of 1st dimension
+        OopMap* map = save_live_registers(sasm, 4);
+        int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers_except_rax(sasm);
+
+        // rax,: new multi array
+        __ verify_oop(rax);
+      }
+      break;
+
+    case graal_register_finalizer_id:
+      {
+        __ set_info("register_finalizer", dont_gc_arguments);
+
+        // This is called via call_runtime so the arguments
+        // will be place in C abi locations
+
+#ifdef _LP64
+        __ verify_oop(j_rarg0);
+        __ mov(rax, j_rarg0);
+#else
+        // The object is passed on the stack and we haven't pushed a
+        // frame yet so it's one work away from top of stack.
+        __ movptr(rax, Address(rsp, 1 * BytesPerWord));
+        __ verify_oop(rax);
+#endif // _LP64
+
+        // load the klass and check the has finalizer flag
+        Label register_finalizer;
+        Register t = rsi;
+        __ load_klass(t, rax);
+        __ movl(t, Address(t, Klass::access_flags_offset()));
+        __ testl(t, JVM_ACC_HAS_FINALIZER);
+        __ jcc(Assembler::notZero, register_finalizer);
+        __ ret(0);
+
+        __ bind(register_finalizer);
+        __ enter();
+        OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, oop_map);
+
+        // Now restore all the live registers
+        restore_live_registers(sasm);
+
+        __ leave();
+        __ ret(0);
+      }
+      break;
+
+    case graal_handle_exception_nofpu_id:
+      { GraalStubFrame f(sasm, "handle_exception", dont_gc_arguments);
+        oop_maps = generate_handle_exception(id, sasm);
+      }
+      break;
+
+    case graal_slow_subtype_check_id:
+      {
+        // Typical calling sequence:
+        // __ push(klass_RInfo);  // object klass or other subclass
+        // __ push(sup_k_RInfo);  // array element klass or other superclass
+        // __ call(slow_subtype_check);
+        // Note that the subclass is pushed first, and is therefore deepest.
+        // Previous versions of this code reversed the names 'sub' and 'super'.
+        // This was operationally harmless but made the code unreadable.
+        enum layout {
+          rax_off, SLOT2(raxH_off)
+          rcx_off, SLOT2(rcxH_off)
+          rsi_off, SLOT2(rsiH_off)
+          rdi_off, SLOT2(rdiH_off)
+          // saved_rbp_off, SLOT2(saved_rbpH_off)
+          return_off, SLOT2(returnH_off)
+          sup_k_off, SLOT2(sup_kH_off)
+          klass_off, SLOT2(superH_off)
+          framesize,
+          result_off = klass_off  // deepest argument is also the return value
+        };
+
+        __ set_info("slow_subtype_check", dont_gc_arguments);
+        __ push(rdi);
+        __ push(rsi);
+        __ push(rcx);
+        __ push(rax);
+
+        // This is called by pushing args and not with C abi
+        __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
+        __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
+
+        Label miss;
+        Label success;
+        __ check_klass_subtype_fast_path(rsi, rax, rcx, &success, &miss, NULL);
+
+        __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
+
+        // fallthrough on success:
+        __ bind(success);
+        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
+        __ pop(rax);
+        __ pop(rcx);
+        __ pop(rsi);
+        __ pop(rdi);
+        __ ret(0);
+
+        __ bind(miss);
+        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
+        __ pop(rax);
+        __ pop(rcx);
+        __ pop(rsi);
+        __ pop(rdi);
+        __ ret(0);
+      }
+      break;
+
+    case graal_unwind_exception_call_id: {
+      // remove the frame from the stack
+      __ movptr(rsp, rbp);
+      __ pop(rbp);
+      // exception_oop is passed using ordinary java calling conventions
+      __ movptr(rax, j_rarg0);
+
+      Label nonNullExceptionOop;
+      __ testptr(rax, rax);
+      __ jcc(Assembler::notZero, nonNullExceptionOop);
+      {
+        __ enter();
+        oop_maps = new OopMapSet();
+        OopMap* oop_map = save_live_registers(sasm, 0);
+        int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
+        oop_maps->add_gc_map(call_offset, oop_map);
+        __ leave();
+      }
+      __ bind(nonNullExceptionOop);
+
+      __ set_info("unwind_exception", dont_gc_arguments);
+      // note: no stubframe since we are about to leave the current
+      //       activation and we are calling a leaf VM function only.
+      generate_unwind_exception(sasm);
+      __ should_not_reach_here();
+      break;
+    }
+
+    case graal_OSR_migration_end_id: {
+    __ enter();
+    save_live_registers(sasm, 0);
+    __ movptr(c_rarg0, j_rarg0);
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end)));
+    restore_live_registers(sasm);
+    __ leave();
+    __ ret(0);
+      break;
+    }
+
+    case graal_set_deopt_info_id: {
+    __ movptr(Address(r15_thread, JavaThread::graal_deopt_info_offset()), rscratch1);
+    __ ret(0);
+      break;
+    }
+
+    case graal_create_null_pointer_exception_id: {
+		__ enter();
+		oop_maps = new OopMapSet();
+		OopMap* oop_map = save_live_registers(sasm, 0);
+		int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
+		oop_maps->add_gc_map(call_offset, oop_map);
+		__ leave();
+		__ ret(0);
+      break;
+    }
+
+    case graal_create_out_of_bounds_exception_id: {
+		__ enter();
+		oop_maps = new OopMapSet();
+		OopMap* oop_map = save_live_registers(sasm, 0);
+		int call_offset = __ call_RT(rax, noreg, (address)graal_create_out_of_bounds_exception, j_rarg0);
+		oop_maps->add_gc_map(call_offset, oop_map);
+		__ leave();
+		__ ret(0);
+      break;
+    }
+
+    case graal_vm_error_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_vm_error, j_rarg0, j_rarg1, j_rarg2);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_log_printf_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_printf, j_rarg0, j_rarg1, j_rarg2);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_log_primitive_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_primitive, j_rarg0, j_rarg1, j_rarg2);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_log_object_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_object, j_rarg0, j_rarg1);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_verify_oop_id: {
+      // We use enter & leave so that a better stack trace is produced in the hs_err file
+      __ enter();
+      __ verify_oop(r13, "Graal verify oop");
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_arithmetic_frem_id: {
+      __ subptr(rsp, 8);
+      __ movflt(Address(rsp, 0), xmm1);
+      __ fld_s(Address(rsp, 0));
+      __ movflt(Address(rsp, 0), xmm0);
+      __ fld_s(Address(rsp, 0));
+      Label L;
+      __ bind(L);
+      __ fprem();
+      __ fwait();
+      __ fnstsw_ax();
+      __ testl(rax, 0x400);
+      __ jcc(Assembler::notZero, L);
+      __ fxch(1);
+      __ fpop();
+      __ fstp_s(Address(rsp, 0));
+      __ movflt(xmm0, Address(rsp, 0));
+      __ addptr(rsp, 8);
+      __ ret(0);
+      break;
+    }
+    case graal_arithmetic_drem_id: {
+      __ subptr(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm1);
+      __ fld_d(Address(rsp, 0));
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      Label L;
+      __ bind(L);
+      __ fprem();
+      __ fwait();
+      __ fnstsw_ax();
+      __ testl(rax, 0x400);
+      __ jcc(Assembler::notZero, L);
+      __ fxch(1);
+      __ fpop();
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addptr(rsp, 8);
+      __ ret(0);
+      break;
+    }
+    case graal_monitorenter_id: {
+      Register obj = j_rarg0;
+      Register lock = j_rarg1;
+      {
+        GraalStubFrame f(sasm, "graal_monitorenter", dont_gc_arguments);
+        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
+
+        // Called with store_parameter and not C abi
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorenter), obj, lock);
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers(sasm, save_fpu_registers);
+      }
+      __ ret(0);
+      break;
+    }
+    case graal_monitorexit_id: {
+      Register obj = j_rarg0;
+      Register lock = j_rarg1;
+      {
+        GraalStubFrame f(sasm, "graal_monitorexit", dont_gc_arguments);
+        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
+
+        // note: really a leaf routine but must setup last java sp
+        //       => use call_RT for now (speed can be improved by
+        //       doing last java sp setup manually)
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorexit), obj, lock);
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers(sasm, save_fpu_registers);
+      }
+      __ ret(0);
+      break;
+    }
+
+    default:
+      { GraalStubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
+        __ movptr(rax, (int)id);
+        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
+        __ should_not_reach_here();
+      }
+      break;
+  }
+  return oop_maps;
+}
+
+#undef __
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/graalStubAssembler_x86.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "graal/graalRuntime.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/markOop.hpp"
+#include "runtime/basicLock.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/os.hpp"
+#include "runtime/stubRoutines.hpp"
+
+#ifndef PRODUCT
+
+void GraalStubAssembler::verify_stack_oop(int stack_offset) {
+  if (!VerifyOops) return;
+  verify_oop_addr(Address(rsp, stack_offset));
+}
+
+void GraalStubAssembler::verify_not_null_oop(Register r) {
+  if (!VerifyOops) return;
+  Label not_null;
+  testptr(r, r);
+  jcc(Assembler::notZero, not_null);
+  stop("non-null oop required");
+  bind(not_null);
+  verify_oop(r);
+}
+
+#endif // ifndef PRODUCT
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -1763,7 +1763,8 @@
 
   int vep_offset = ((intptr_t)__ pc()) - start;
 
-#ifdef COMPILER1 || GRAAL
+#if defined(COMPILER1) || defined(GRAAL)
+
   if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
     // Object.hashCode can pull the hashCode from the header word
     // instead of doing a full VM transition once it's been computed.
--- a/src/share/vm/c1/c1_globals.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/c1/c1_globals.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -56,14 +56,6 @@
 //
 #define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
                                                                             \
-  product(bool, DebugGraal, true,                                           \
-          "Enable JVMTI for the compiler thread")                           \
-  product(bool, BootstrapGraal, true,                                       \
-          "Bootstrap graal before running Java main method")                \
-  product(ccstr, GraalClassPath, NULL,                                      \
-          "Use the defined graal class path instead of searching for the classes") \
-  product(intx, TraceGraal, 0,                                              \
-          "Trace level for graal")                                          \
   product(bool, TraceSignals, false,                                        \
           "Trace signals and implicit exception handling")                  \
   /* Printing */                                                            \
--- a/src/share/vm/code/codeBlob.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/code/codeBlob.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -134,10 +134,11 @@
   cb->copy_code_and_locs_to(this);
   set_oop_maps(oop_maps);
   _frame_size = frame_size;
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
+
   // probably wrong for tiered
   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
-#endif // COMPILER1
+#endif // COMPILER1 || GRAAL
 }
 
 
@@ -383,7 +384,7 @@
   _unpack_offset           = unpack_offset;
   _unpack_with_exception   = unpack_with_exception_offset;
   _unpack_with_reexecution = unpack_with_reexecution_offset;
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
   _unpack_with_exception_in_tls   = -1;
 #endif
 }
--- a/src/share/vm/code/codeBlob.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/code/codeBlob.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -105,6 +105,7 @@
 
   virtual bool is_compiled_by_c2() const         { return false; }
   virtual bool is_compiled_by_c1() const         { return false; }
+  virtual bool is_compiled_by_graal() const      { return false; }
 
   // Casting
   nmethod* as_nmethod_or_null()                  { return is_nmethod() ? (nmethod*) this : NULL; }
@@ -357,7 +358,7 @@
 
   int _unpack_with_exception_in_tls;
 
-  // (thomaswue) Offset when graal calls uncommon_trap.
+  // (thomaswue) Offset when Graal calls uncommon_trap.
   int _uncommon_trap_offset;
   int _implicit_exception_uncommon_trap_offset;
 
@@ -415,7 +416,7 @@
   }
   address unpack_with_exception_in_tls() const   { return code_begin() + _unpack_with_exception_in_tls; }
 
-  // (thomaswue) Offset when graal calls uncommon_trap.
+  // (thomaswue) Offset when Graal calls uncommon_trap.
   void set_uncommon_trap_offset(int offset) {
     _uncommon_trap_offset = offset;
     assert(contains(code_begin() + _uncommon_trap_offset), "must be PC inside codeblob");
--- a/src/share/vm/code/compiledIC.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/code/compiledIC.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -239,8 +239,8 @@
   // for calling directly to vep without using the inline cache (i.e., cached_value == NULL)
 #ifdef ASSERT
   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
-  bool is_c1_method = caller->is_compiled_by_c1();
-  assert( is_c1_method ||
+  bool is_c1_or_graal_method = caller->is_compiled_by_c1() || caller->is_compiled_by_graal();
+  assert( is_c1_or_graal_method ||
          !is_monomorphic ||
          is_optimized() ||
          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
--- a/src/share/vm/code/nmethod.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/code/nmethod.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -102,6 +102,11 @@
   if (is_native_method()) return false;
   return compiler()->is_c1();
 }
+bool nmethod::is_compiled_by_graal() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
+  if (is_native_method()) return false;
+  return compiler()->is_graal();
+}
 bool nmethod::is_compiled_by_c2() const {
   if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
   if (is_native_method()) return false;
@@ -864,7 +869,7 @@
 #ifdef GRAAL
     _graal_installed_code = installed_code();
 
-    // graal produces no (!) stub section
+    // Graal might not produce any stub sections
     if (offsets->value(CodeOffsets::Exceptions) != -1) {
       _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
     } else {
@@ -2552,6 +2557,8 @@
     tty->print("(c2) ");
   } else if (is_compiled_by_shark()) {
     tty->print("(shark) ");
+  } else if (is_compiled_by_graal()) {
+    tty->print("(Graal) ");
   } else {
     tty->print("(nm) ");
   }
--- a/src/share/vm/code/nmethod.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/code/nmethod.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -352,6 +352,7 @@
   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
 
   bool is_compiled_by_c1() const;
+  bool is_compiled_by_graal() const;
   bool is_compiled_by_c2() const;
   bool is_compiled_by_shark() const;
 
--- a/src/share/vm/compiler/abstractCompiler.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/compiler/abstractCompiler.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -50,26 +50,36 @@
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
   virtual bool supports_osr   ()                 { return true; }
-#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK))
+#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !defined(GRAAL))
   virtual bool is_c1   ()                        { return false; }
   virtual bool is_c2   ()                        { return false; }
   virtual bool is_shark()                        { return false; }
+  virtual bool is_graal()                        { return false; }
 #else
 #ifdef COMPILER1
   bool is_c1   ()                                { return true; }
   bool is_c2   ()                                { return false; }
   bool is_shark()                                { return false; }
+  bool is_graal()                                { return false; }
 #endif // COMPILER1
 #ifdef COMPILER2
   bool is_c1   ()                                { return false; }
   bool is_c2   ()                                { return true; }
   bool is_shark()                                { return false; }
+  bool is_graal()                                { return false; }
 #endif // COMPILER2
 #ifdef SHARK
   bool is_c1   ()                                { return false; }
   bool is_c2   ()                                { return false; }
   bool is_shark()                                { return true; }
+  bool is_graal()                                { return false; }
 #endif // SHARK
+#ifdef GRAAL
+  bool is_c1   ()                                { return false; }
+  bool is_c2   ()                                { return false; }
+  bool is_shark()                                { return false; }
+  bool is_graal()                                { return true; }
+#endif // GRAAL
 #endif // TIERED
 
   void mark_initialized()                        { _is_initialized = true; }
--- a/src/share/vm/compiler/compileBroker.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -51,6 +51,9 @@
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalCompiler.hpp"
+#endif
 #ifdef COMPILER2
 #include "opto/c2compiler.hpp"
 #endif
--- a/src/share/vm/compiler/oopMap.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/compiler/oopMap.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -319,6 +319,7 @@
 static void add_derived_oop(oop* base, oop* derived) {
 #ifndef TIERED
   COMPILER1_PRESENT(ShouldNotReachHere();)
+  GRAAL_ONLY(ShouldNotReachHere();)
 #endif // TIERED
 #ifdef COMPILER2
   DerivedPointerTable::add(derived, base);
@@ -380,6 +381,7 @@
     if (!oms.is_done()) {
 #ifndef TIERED
       COMPILER1_PRESENT(ShouldNotReachHere();)
+      GRAAL_ONLY(ShouldNotReachHere();)
 #endif // !TIERED
       // Protect the operation on the derived pointers.  This
       // protects the addition of derived pointers to the shared
@@ -502,7 +504,7 @@
 
   // Check that runtime stubs save all callee-saved registers
 #ifdef COMPILER2
-  assert(cb->is_compiled_by_c1() || !cb->is_runtime_stub() ||
+  assert(cb->is_compiled_by_c1() || cb->is_compiled_by_graal() || !cb->is_runtime_stub() ||
          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
          "must save all");
 #endif // COMPILER2
@@ -521,6 +523,7 @@
 bool OopMap::has_derived_pointer() const {
 #ifndef TIERED
   COMPILER1_PRESENT(return false);
+  GRAAL_ONLY(return false);
 #endif // !TIERED
 #ifdef COMPILER2
   OopMapStream oms((OopMap*)this,OopMapValue::derived_oop_value);
--- a/src/share/vm/graal/graalCodeInstaller.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/graal/graalCodeInstaller.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -29,7 +29,7 @@
 #include "graal/graalJavaAccess.hpp"
 #include "graal/graalCompilerToVM.hpp"
 #include "graal/graalVmIds.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "vmreg_x86.inline.hpp"
 
--- a/src/share/vm/graal/graalCompiler.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/graal/graalCompiler.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -30,7 +30,7 @@
 #include "graal/graalCompilerToVM.hpp"
 #include "graal/graalVmIds.hpp"
 #include "graal/graalEnv.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/compilationPolicy.hpp"
 
@@ -57,7 +57,7 @@
   _deopted_leaf_graph_count = 0;
 
   initialize_buffer_blob();
-  Runtime1::initialize(THREAD->get_buffer_blob());
+  GraalRuntime::initialize(THREAD->get_buffer_blob());
 
   JNIEnv *env = ((JavaThread *) Thread::current())->jni_environment();
   jclass klass = env->FindClass("com/oracle/graal/hotspot/bridge/CompilerToVMImpl");
@@ -144,12 +144,7 @@
 
   JavaThread* THREAD = JavaThread::current();
   if (THREAD->get_buffer_blob() == NULL) {
-    // setup CodeBuffer.  Preallocate a BufferBlob of size
-    // NMethodSizeLimit plus some extra space for constants.
-    int code_buffer_size = Compilation::desired_max_code_buffer_size() +
-      Compilation::desired_max_constant_size();
-    BufferBlob* blob = BufferBlob::create("graal temporary CodeBuffer",
-                                          code_buffer_size);
+    BufferBlob* blob = BufferBlob::create("Graal thread-local CodeBuffer", GraalNMethodSizeLimit);
     guarantee(blob != NULL, "must create code buffer");
     THREAD->set_buffer_blob(blob);
   }
--- a/src/share/vm/graal/graalCompiler.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/graal/graalCompiler.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -48,12 +48,11 @@
 
   virtual const char* name() { return "G"; }
 
-  // Native / OSR not supported
-  virtual bool supports_native()                 { return true; }
+  virtual bool supports_native()                 { return false; }
   virtual bool supports_osr   ()                 { return true; }
 
-  // Pretend to be C1
-  bool is_c1   ()                                { return true; }
+  bool is_graal()                                { return true; }
+  bool is_c1   ()                                { return false; }
   bool is_c2   ()                                { return false; }
 
   // Initialization
--- a/src/share/vm/graal/graalCompilerToVM.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/graal/graalCompilerToVM.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -27,7 +27,7 @@
 #include "oops/generateOopMap.hpp"
 #include "oops/fieldStreams.hpp"
 #include "runtime/javaCalls.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "graal/graalCompilerToVM.hpp"
@@ -669,41 +669,36 @@
   set_int(env, config, "bciProfileWidth", BciProfileWidth);
   set_int(env, config, "typeProfileWidth", TypeProfileWidth);
 
-  // We use the fast path stub so that we get TLAB refills whenever possible instead of
-  // unconditionally allocating directly from the heap (which the slow path does).
-  // The stub must also do initialization when the compiled check fails.
-  Runtime1::StubID newInstanceStub = Runtime1::fast_new_instance_init_check_id;
-
   set_long(env, config, "debugStub", VmIds::addStub((address)warning));
-  set_long(env, config, "instanceofStub", VmIds::addStub(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
-  set_long(env, config, "newInstanceStub", VmIds::addStub(Runtime1::entry_for(newInstanceStub)));
-  set_long(env, config, "newTypeArrayStub", VmIds::addStub(Runtime1::entry_for(Runtime1::new_type_array_id)));
-  set_long(env, config, "newObjectArrayStub", VmIds::addStub(Runtime1::entry_for(Runtime1::new_object_array_id)));
-  set_long(env, config, "newMultiArrayStub", VmIds::addStub(Runtime1::entry_for(Runtime1::new_multi_array_id)));
+  set_long(env, config, "instanceofStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_slow_subtype_check_id)));
+  set_long(env, config, "newInstanceStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_instance_id)));
+  set_long(env, config, "newTypeArrayStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_type_array_id)));
+  set_long(env, config, "newObjectArrayStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_object_array_id)));
+  set_long(env, config, "newMultiArrayStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_multi_array_id)));
   set_long(env, config, "inlineCacheMissStub", VmIds::addStub(SharedRuntime::get_ic_miss_stub()));
-  set_long(env, config, "handleExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
+  set_long(env, config, "handleExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_handle_exception_nofpu_id)));
   set_long(env, config, "handleDeoptStub", VmIds::addStub(SharedRuntime::deopt_blob()->unpack()));
-  set_long(env, config, "fastMonitorEnterStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_monitorenter_id)));
-  set_long(env, config, "fastMonitorExitStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_monitorexit_id)));
-  set_long(env, config, "verifyOopStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_verify_oop_id)));
-  set_long(env, config, "vmErrorStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_vm_error_id)));
+  set_long(env, config, "monitorEnterStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_monitorenter_id)));
+  set_long(env, config, "monitorExitStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_monitorexit_id)));
+  set_long(env, config, "verifyOopStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_verify_oop_id)));
+  set_long(env, config, "vmErrorStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_vm_error_id)));
   set_long(env, config, "deoptimizeStub", VmIds::addStub(SharedRuntime::deopt_blob()->uncommon_trap()));
-  set_long(env, config, "unwindExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_unwind_exception_call_id)));
-  set_long(env, config, "osrMigrationEndStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_OSR_migration_end_id)));
-  set_long(env, config, "registerFinalizerStub", VmIds::addStub(Runtime1::entry_for(Runtime1::register_finalizer_id)));
-  set_long(env, config, "setDeoptInfoStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_set_deopt_info_id)));
-  set_long(env, config, "createNullPointerExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_create_null_pointer_exception_id)));
-  set_long(env, config, "createOutOfBoundsExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_create_out_of_bounds_exception_id)));
+  set_long(env, config, "unwindExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_unwind_exception_call_id)));
+  set_long(env, config, "osrMigrationEndStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_OSR_migration_end_id)));
+  set_long(env, config, "registerFinalizerStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_register_finalizer_id)));
+  set_long(env, config, "setDeoptInfoStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_set_deopt_info_id)));
+  set_long(env, config, "createNullPointerExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_create_null_pointer_exception_id)));
+  set_long(env, config, "createOutOfBoundsExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_create_out_of_bounds_exception_id)));
   set_long(env, config, "javaTimeMillisStub", VmIds::addStub(CAST_FROM_FN_PTR(address, os::javaTimeMillis)));
   set_long(env, config, "javaTimeNanosStub", VmIds::addStub(CAST_FROM_FN_PTR(address, os::javaTimeNanos)));
-  set_long(env, config, "arithmeticFremStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_arithmetic_frem_id)));
-  set_long(env, config, "arithmeticDremStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_arithmetic_drem_id)));
+  set_long(env, config, "arithmeticFremStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_arithmetic_frem_id)));
+  set_long(env, config, "arithmeticDremStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_arithmetic_drem_id)));
   set_long(env, config, "arithmeticSinStub", VmIds::addStub(CAST_FROM_FN_PTR(address, SharedRuntime::dsin)));
   set_long(env, config, "arithmeticCosStub", VmIds::addStub(CAST_FROM_FN_PTR(address, SharedRuntime::dcos)));
   set_long(env, config, "arithmeticTanStub", VmIds::addStub(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)));
-  set_long(env, config, "logPrimitiveStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_log_primitive_id)));
-  set_long(env, config, "logObjectStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_log_object_id)));
-  set_long(env, config, "logPrintfStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_log_printf_id)));
+  set_long(env, config, "logPrimitiveStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_log_primitive_id)));
+  set_long(env, config, "logObjectStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_log_object_id)));
+  set_long(env, config, "logPrintfStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_log_printf_id)));
 
 
   BarrierSet* bs = Universe::heap()->barrier_set();
--- a/src/share/vm/graal/graalEnv.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/graal/graalEnv.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -42,7 +42,7 @@
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/dtrace.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 
 // ------------------------------------------------------------------
 // Note: the logic of this method should mirror the logic of
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/graal/graalGlobals.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "graal/graalGlobals.hpp"
+
+GRAAL_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/graal/graalGlobals.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GRAAL_GRAALGLOBALS_HPP
+#define SHARE_VM_GRAAL_GRAALGLOBALS_HPP
+
+#include "runtime/globals.hpp"
+#ifdef TARGET_ARCH_x86
+# include "graalGlobals_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "graalGlobals_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "graalGlobals_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "graalGlobals_ppc.hpp"
+#endif
+
+//
+// Defines all global flags used by the Graal compiler.
+//
+#define GRAAL_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
+                                                                            \
+  product(bool, DebugGraal, true,                                           \
+          "Enable JVMTI for the compiler thread")                           \
+                                                                            \
+  product(bool, BootstrapGraal, true,                                       \
+          "Bootstrap Graal before running Java main method")                \
+                                                                            \
+  product(ccstr, GraalClassPath, NULL,                                      \
+          "Use the class path for Graal classes")                           \
+                                                                            \
+  product(intx, TraceGraal, 0,                                              \
+          "Trace level for Graal")                                          \
+                                                                            \
+  product(bool, TraceSignals, false,                                        \
+          "Trace signals and implicit exception handling")                  \
+                                                                            \
+  product_pd(intx, SafepointPollOffset,                                     \
+          "Offset added to polling address (Intel only)")                   \
+                                                                            \
+  develop(bool, UseFastNewInstance, true,                                   \
+          "Use fast inlined instance allocation")                           \
+                                                                            \
+  develop(bool, UseFastNewTypeArray, true,                                  \
+          "Use fast inlined type array allocation")                         \
+                                                                            \
+  develop(bool, UseFastNewObjectArray, true,                                \
+          "Use fast inlined object array allocation")                       \
+                                                                            \
+  develop(bool, UseFastLocking, true,                                       \
+          "Use fast inlined locking code")                                  \
+                                                                            \
+  develop(intx, GraalNMethodSizeLimit, (64*K)*wordSize,                     \
+          "Maximum size of a compiled method.")                             \
+                                                                            \
+  notproduct(bool, PrintSimpleStubs, false,                                 \
+          "Print SimpleStubs")                                              \
+                                                                            \
+
+
+// Read default values for Graal globals
+
+GRAAL_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+
+#endif // SHARE_VM_GRAAL_GRAALGLOBALS_HPP
--- a/src/share/vm/graal/graalRuntime.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/graal/graalRuntime.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -24,7 +24,573 @@
 #include "precompiled.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "prims/jvm.h"
+#include "graal/graalRuntime.hpp"
 #include "graal/graalVMToCompiler.hpp"
+#include "asm/codeBuffer.hpp"
+#include "runtime/biasedLocking.hpp"
+
+// Implementation of GraalStubAssembler
+
+GraalStubAssembler::GraalStubAssembler(CodeBuffer* code, const char * name, int stub_id) : MacroAssembler(code) {
+  _name = name;
+  _must_gc_arguments = false;
+  _frame_size = no_frame_size;
+  _num_rt_args = 0;
+  _stub_id = stub_id;
+}
+
+
+void GraalStubAssembler::set_info(const char* name, bool must_gc_arguments) {
+  _name = name;
+  _must_gc_arguments = must_gc_arguments;
+}
+
+
+void GraalStubAssembler::set_frame_size(int size) {
+  if (_frame_size == no_frame_size) {
+    _frame_size = size;
+  }
+  assert(_frame_size == size, "can't change the frame size");
+}
+
+
+void GraalStubAssembler::set_num_rt_args(int args) {
+  if (_num_rt_args == 0) {
+    _num_rt_args = args;
+  }
+  assert(_num_rt_args == args, "can't change the number of args");
+}
+
+// Implementation of GraalRuntime
+
+CodeBlob* GraalRuntime::_blobs[GraalRuntime::number_of_ids];
+const char *GraalRuntime::_blob_names[] = {
+  GRAAL_STUBS(STUB_NAME, LAST_STUB_NAME)
+};
+
+// Simple helper to see if the caller of a runtime stub which
+// entered the VM has been deoptimized
+
+static bool caller_is_deopted() {
+  JavaThread* thread = JavaThread::current();
+  RegisterMap reg_map(thread, false);
+  frame runtime_frame = thread->last_frame();
+  frame caller_frame = runtime_frame.sender(&reg_map);
+  assert(caller_frame.is_compiled_frame(), "must be compiled");
+  return caller_frame.is_deoptimized_frame();
+}
+
+// Stress deoptimization
+static void deopt_caller() {
+  if ( !caller_is_deopted()) {
+    JavaThread* thread = JavaThread::current();
+    RegisterMap reg_map(thread, false);
+    frame runtime_frame = thread->last_frame();
+    frame caller_frame = runtime_frame.sender(&reg_map);
+    Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
+    assert(caller_is_deopted(), "Must be deoptimized");
+  }
+}
+
+static bool setup_code_buffer(CodeBuffer* code) {
+  // Preinitialize the consts section to some large size:
+  int locs_buffer_size = 1 * (relocInfo::length_limit + sizeof(relocInfo));
+  char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
+  code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
+                                        locs_buffer_size / sizeof(relocInfo));
+
+  // Global stubs have neither constants nor local stubs
+  code->initialize_consts_size(0);
+  code->initialize_stubs_size(0);
+
+  return true;
+}
+
+void GraalRuntime::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
+  assert(0 <= id && id < number_of_ids, "illegal stub id");
+  ResourceMark rm;
+  // create code buffer for code storage
+  CodeBuffer code(buffer_blob);
+
+  setup_code_buffer(&code);
+
+  // create assembler for code generation
+  GraalStubAssembler* sasm = new GraalStubAssembler(&code, name_for(id), id);
+  // generate code for runtime stub
+  OopMapSet* oop_maps;
+  oop_maps = generate_code_for(id, sasm);
+  assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
+         "if stub has an oop map it must have a valid frame size");
+
+#ifdef ASSERT
+  // Make sure that stubs that need oopmaps have them
+  switch (id) {
+    // These stubs don't need to have an oopmap
+    case graal_slow_subtype_check_id:
+#if defined(SPARC) || defined(PPC)
+    case handle_exception_nofpu_id:  // Unused on sparc
+#endif
+#ifdef GRAAL
+    case graal_verify_oop_id:
+    case graal_unwind_exception_call_id:
+    case graal_OSR_migration_end_id:
+    case graal_arithmetic_frem_id:
+    case graal_arithmetic_drem_id:
+    case graal_set_deopt_info_id:
+#endif
+      break;
+
+    // All other stubs should have oopmaps
+    default:
+      assert(oop_maps != NULL, "must have an oopmap");
+  }
+#endif
+
+  // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
+  sasm->align(BytesPerWord);
+  // make sure all code is in code buffer
+  sasm->flush();
+  // create blob - distinguish a few special cases
+  CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
+                                                 &code,
+                                                 CodeOffsets::frame_never_safe,
+                                                 sasm->frame_size(),
+                                                 oop_maps,
+                                                 sasm->must_gc_arguments());
+  // install blob
+  assert(blob != NULL, "blob must exist");
+  _blobs[id] = blob;
+}
+
+
+void GraalRuntime::initialize(BufferBlob* blob) {
+  // generate stubs
+  for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
+  // printing
+#ifndef PRODUCT
+  if (PrintSimpleStubs) {
+    ResourceMark rm;
+    for (int id = 0; id < number_of_ids; id++) {
+      _blobs[id]->print();
+      if (_blobs[id]->oop_maps() != NULL) {
+        _blobs[id]->oop_maps()->print();
+      }
+    }
+  }
+#endif
+}
+
+
+CodeBlob* GraalRuntime::blob_for(StubID id) {
+  assert(0 <= id && id < number_of_ids, "illegal stub id");
+  return _blobs[id];
+}
+
+
+const char* GraalRuntime::name_for(StubID id) {
+  assert(0 <= id && id < number_of_ids, "illegal stub id");
+  return _blob_names[id];
+}
+
+const char* GraalRuntime::name_for_address(address entry) {
+  for (int id = 0; id < number_of_ids; id++) {
+    if (entry == entry_for((StubID)id)) return name_for((StubID)id);
+  }
+
+#define FUNCTION_CASE(a, f) \
+  if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
+
+  FUNCTION_CASE(entry, os::javaTimeMillis);
+  FUNCTION_CASE(entry, os::javaTimeNanos);
+  FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
+  FUNCTION_CASE(entry, SharedRuntime::d2f);
+  FUNCTION_CASE(entry, SharedRuntime::d2i);
+  FUNCTION_CASE(entry, SharedRuntime::d2l);
+  FUNCTION_CASE(entry, SharedRuntime::dcos);
+  FUNCTION_CASE(entry, SharedRuntime::dexp);
+  FUNCTION_CASE(entry, SharedRuntime::dlog);
+  FUNCTION_CASE(entry, SharedRuntime::dlog10);
+  FUNCTION_CASE(entry, SharedRuntime::dpow);
+  FUNCTION_CASE(entry, SharedRuntime::drem);
+  FUNCTION_CASE(entry, SharedRuntime::dsin);
+  FUNCTION_CASE(entry, SharedRuntime::dtan);
+  FUNCTION_CASE(entry, SharedRuntime::f2i);
+  FUNCTION_CASE(entry, SharedRuntime::f2l);
+  FUNCTION_CASE(entry, SharedRuntime::frem);
+  FUNCTION_CASE(entry, SharedRuntime::l2d);
+  FUNCTION_CASE(entry, SharedRuntime::l2f);
+  FUNCTION_CASE(entry, SharedRuntime::ldiv);
+  FUNCTION_CASE(entry, SharedRuntime::lmul);
+  FUNCTION_CASE(entry, SharedRuntime::lrem);
+  FUNCTION_CASE(entry, SharedRuntime::lrem);
+  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
+  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
+#ifdef TRACE_HAVE_INTRINSICS
+  FUNCTION_CASE(entry, TRACE_TIME_METHOD);
+#endif
+
+#undef FUNCTION_CASE
+}
+
+
+JRT_ENTRY(void, GraalRuntime::new_instance(JavaThread* thread, Klass* klass))
+  assert(klass->is_klass(), "not a class");
+  instanceKlassHandle h(thread, klass);
+  h->check_valid_for_instantiation(true, CHECK);
+  // make sure klass is initialized
+  h->initialize(CHECK);
+  // allocate instance and return via TLS
+  oop obj = h->allocate_instance(CHECK);
+  thread->set_vm_result(obj);
+JRT_END
+
+
+JRT_ENTRY(void, GraalRuntime::new_type_array(JavaThread* thread, Klass* klass, jint length))
+  // Note: no handle for klass needed since they are not used
+  //       anymore after new_typeArray() and no GC can happen before.
+  //       (This may have to change if this code changes!)
+  assert(klass->is_klass(), "not a class");
+  BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
+  oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
+  thread->set_vm_result(obj);
+  // This is pretty rare but this runtime patch is stressful to deoptimization
+  // if we deoptimize here so force a deopt to stress the path.
+  if (DeoptimizeALot) {
+    deopt_caller();
+  }
+
+JRT_END
+
+
+JRT_ENTRY(void, GraalRuntime::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
+  // Note: no handle for klass needed since they are not used
+  //       anymore after new_objArray() and no GC can happen before.
+  //       (This may have to change if this code changes!)
+  assert(array_klass->is_klass(), "not a class");
+  Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
+  objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
+  thread->set_vm_result(obj);
+  // This is pretty rare but this runtime patch is stressful to deoptimization
+  // if we deoptimize here so force a deopt to stress the path.
+  if (DeoptimizeALot) {
+    deopt_caller();
+  }
+JRT_END
+
+
+JRT_ENTRY(void, GraalRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
+  assert(klass->is_klass(), "not a class");
+  assert(rank >= 1, "rank must be nonzero");
+  oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
+  thread->set_vm_result(obj);
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::unimplemented_entry(JavaThread* thread, StubID id))
+  tty->print_cr("GraalRuntime::entry_for(%d) returned unimplemented entry point", id);
+JRT_END
+
+extern void vm_exit(int code);
+
+// Enter this method from compiled code handler below. This is where we transition
+// to VM mode. This is done as a helper routine so that the method called directly
+// from compiled code does not have to transition to VM. This allows the entry
+// method to see if the nmethod that we have just looked up a handler for has
+// been deoptimized while we were in the vm. This simplifies the assembly code
+// cpu directories.
+//
+// We are entering here from exception stub (via the entry method below)
+// If there is a compiled exception handler in this method, we will continue there;
+// otherwise we will unwind the stack and continue at the caller of top frame method
+// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
+// control the area where we can allow a safepoint. After we exit the safepoint area we can
+// check to see if the handler we are going to return is now in a nmethod that has
+// been deoptimized. If that is the case we return the deopt blob
+// unpack_with_exception entry instead. This makes life for the exception blob easier
+// because making that same check and diverting is painful from assembly language.
+JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
+  // Reset method handle flag.
+  thread->set_is_method_handle_return(false);
+
+  Handle exception(thread, ex);
+  nm = CodeCache::find_nmethod(pc);
+  assert(nm != NULL, "this is not an nmethod");
+  // Adjust the pc as needed/
+  if (nm->is_deopt_pc(pc)) {
+    RegisterMap map(thread, false);
+    frame exception_frame = thread->last_frame().sender(&map);
+    // if the frame isn't deopted then pc must not correspond to the caller of last_frame
+    assert(exception_frame.is_deoptimized_frame(), "must be deopted");
+    pc = exception_frame.pc();
+  }
+#ifdef ASSERT
+  assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
+  assert(exception->is_oop(), "just checking");
+  // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
+  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
+    if (ExitVMOnVerifyError) vm_exit(-1);
+    ShouldNotReachHere();
+  }
+#endif
+
+  // Check the stack guard pages and reenable them if necessary and there is
+  // enough space on the stack to do so.  Use fast exceptions only if the guard
+  // pages are enabled.
+  bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+  if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
+
+  if (JvmtiExport::can_post_on_exceptions()) {
+    // To ensure correct notification of exception catches and throws
+    // we have to deoptimize here.  If we attempted to notify the
+    // catches and throws during this exception lookup it's possible
+    // we could deoptimize on the way out of the VM and end back in
+    // the interpreter at the throw site.  This would result in double
+    // notifications since the interpreter would also notify about
+    // these same catches and throws as it unwound the frame.
+
+    RegisterMap reg_map(thread);
+    frame stub_frame = thread->last_frame();
+    frame caller_frame = stub_frame.sender(&reg_map);
+
+    // We don't really want to deoptimize the nmethod itself since we
+    // can actually continue in the exception handler ourselves but I
+    // don't see an easy way to have the desired effect.
+    Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
+    assert(caller_is_deopted(), "Must be deoptimized");
+
+    return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
+  }
+
+  // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
+  if (guard_pages_enabled) {
+    address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
+    if (fast_continuation != NULL) {
+      // Set flag if return address is a method handle call site.
+      thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
+      return fast_continuation;
+    }
+  }
+
+  // If the stack guard pages are enabled, check whether there is a handler in
+  // the current method.  Otherwise (guard pages disabled), force an unwind and
+  // skip the exception cache update (i.e., just leave continuation==NULL).
+  address continuation = NULL;
+  if (guard_pages_enabled) {
+
+    // New exception handling mechanism can support inlined methods
+    // with exception handlers since the mappings are from PC to PC
+
+    // debugging support
+    // tracing
+    if (TraceExceptions) {
+      ttyLocker ttyl;
+      ResourceMark rm;
+      int offset = pc - nm->code_begin();
+      tty->print_cr("Exception <%s> (0x%x) thrown in compiled method <%s> at PC " PTR_FORMAT " [" PTR_FORMAT "+%d] for thread 0x%x",
+                    exception->print_value_string(), (address)exception(), nm->method()->print_value_string(), pc, nm->code_begin(), offset, thread);
+    }
+    // for AbortVMOnException flag
+    NOT_PRODUCT(Exceptions::debug_check_abort(exception));
+
+    // Clear out the exception oop and pc since looking up an
+    // exception handler can cause class loading, which might throw an
+    // exception and those fields are expected to be clear during
+    // normal bytecode execution.
+    thread->set_exception_oop(NULL);
+    thread->set_exception_pc(NULL);
+
+    continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
+    // If an exception was thrown during exception dispatch, the exception oop may have changed
+    thread->set_exception_oop(exception());
+    thread->set_exception_pc(pc);
+
+    // the exception cache is used only by non-implicit exceptions
+    if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) {
+      nm->add_handler_for_exception_and_pc(exception, pc, continuation);
+    }
+  }
+
+  thread->set_vm_result(exception());
+  // Set flag if return address is a method handle call site.
+  thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
+
+  if (TraceExceptions) {
+    ttyLocker ttyl;
+    ResourceMark rm;
+    tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
+                  thread, continuation, pc);
+  }
+
+  return continuation;
+JRT_END
+
+// Enter this method from compiled code only if there is a Java exception handler
+// in the method handling the exception.
+// We are entering here from exception stub. We don't do a normal VM transition here.
+// We do it in a helper. This is so we can check to see if the nmethod we have just
+// searched for an exception handler has been deoptimized in the meantime.
+address GraalRuntime::exception_handler_for_pc(JavaThread* thread) {
+  oop exception = thread->exception_oop();
+  address pc = thread->exception_pc();
+  // Still in Java mode
+  DEBUG_ONLY(ResetNoHandleMark rnhm);
+  nmethod* nm = NULL;
+  address continuation = NULL;
+  {
+    // Enter VM mode by calling the helper
+    ResetNoHandleMark rnhm;
+    continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
+  }
+  // Back in JAVA, use no oops DON'T safepoint
+
+  // Now check to see if the nmethod we were called from is now deoptimized.
+  // If so we must return to the deopt blob and deoptimize the nmethod
+  if (nm != NULL && caller_is_deopted()) {
+    continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
+  }
+
+  assert(continuation != NULL, "no handler found");
+  return continuation;
+}
+
+JRT_ENTRY(void, GraalRuntime::graal_create_null_exception(JavaThread* thread))
+  thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_NullPointerException(), NULL)());
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_create_out_of_bounds_exception(JavaThread* thread, jint index))
+  char message[jintAsStringSize];
+  sprintf(message, "%d", index);
+  thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)());
+JRT_END
+
+JRT_ENTRY_NO_ASYNC(void, GraalRuntime::graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock))
+  if (TraceGraal >= 3) {
+    char type[O_BUFLEN];
+    obj->klass()->name()->as_C_string(type, O_BUFLEN);
+    markOop mark = obj->mark();
+    tty->print_cr("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), obj, type, mark, lock);
+    tty->flush();
+  }
+#ifdef ASSERT
+  if (PrintBiasedLockingStatistics) {
+    Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
+  }
+#endif
+  Handle h_obj(thread, obj);
+  assert(h_obj()->is_oop(), "must be NULL or an object");
+  if (UseBiasedLocking) {
+    // Retry fast entry if bias is revoked to avoid unnecessary inflation
+    ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
+  } else {
+    if (UseFastLocking) {
+      // When using fast locking, the compiled code has already tried the fast case
+      ObjectSynchronizer::slow_enter(h_obj, lock, THREAD);
+    } else {
+      ObjectSynchronizer::fast_enter(h_obj, lock, false, THREAD);
+    }
+  }
+  if (TraceGraal >= 3) {
+    tty->print_cr("%s: exiting locking slow with obj=" INTPTR_FORMAT, thread->name(), obj);
+  }
+JRT_END
+
+
+JRT_LEAF(void, GraalRuntime::graal_monitorexit(JavaThread* thread, oopDesc* obj, BasicLock* lock))
+  assert(thread == JavaThread::current(), "threads must correspond");
+  assert(thread->last_Java_sp(), "last_Java_sp must be set");
+  // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
+  EXCEPTION_MARK;
+
+#ifdef DEBUG
+  if (!obj->is_oop()) {
+    ResetNoHandleMark rhm;
+    nmethod* method = thread->last_frame().cb()->as_nmethod_or_null();
+    if (method != NULL) {
+      tty->print_cr("ERROR in monitorexit in method %s wrong obj " INTPTR_FORMAT, method->name(), obj);
+    }
+    thread->print_stack_on(tty);
+    assert(false, "invalid lock object pointer dected");
+  }
+#endif
+
+  if (UseFastLocking) {
+    // When using fast locking, the compiled code has already tried the fast case
+    ObjectSynchronizer::slow_exit(obj, lock, THREAD);
+  } else {
+    ObjectSynchronizer::fast_exit(obj, lock, THREAD);
+  }
+  if (TraceGraal >= 3) {
+    char type[O_BUFLEN];
+    obj->klass()->name()->as_C_string(type, O_BUFLEN);
+    tty->print_cr("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), obj, type, obj->mark(), lock);
+    tty->flush();
+  }
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_log_object(JavaThread* thread, oop obj, jint flags))
+  bool string =  mask_bits_are_true(flags, LOG_OBJECT_STRING);
+  bool address = mask_bits_are_true(flags, LOG_OBJECT_ADDRESS);
+  bool newline = mask_bits_are_true(flags, LOG_OBJECT_NEWLINE);
+  if (!string) {
+    if (!address && obj->is_oop_or_null(true)) {
+      char buf[O_BUFLEN];
+      tty->print("%s@%p", obj->klass()->name()->as_C_string(buf, O_BUFLEN), obj);
+    } else {
+      tty->print("%p", obj);
+    }
+  } else {
+    ResourceMark rm;
+    assert(obj != NULL && java_lang_String::is_instance(obj), "must be");
+    char *buf = java_lang_String::as_utf8_string(obj);
+    tty->print(buf);
+  }
+  if (newline) {
+    tty->cr();
+  }
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_vm_error(JavaThread* thread, oop where, oop format, jlong value))
+  ResourceMark rm;
+  assert(where == NULL || java_lang_String::is_instance(where), "must be");
+  const char *error_msg = where == NULL ? "<internal Graal error>" : java_lang_String::as_utf8_string(where);
+  char *detail_msg = NULL;
+  if (format != NULL) {
+    const char* buf = java_lang_String::as_utf8_string(format);
+    size_t detail_msg_length = strlen(buf) * 2;
+    detail_msg = (char *) NEW_RESOURCE_ARRAY(u_char, detail_msg_length);
+    jio_snprintf(detail_msg, detail_msg_length, buf, value);
+  }
+  report_vm_error(__FILE__, __LINE__, error_msg, detail_msg);
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_log_printf(JavaThread* thread, oop format, jlong val))
+  ResourceMark rm;
+  assert(format != NULL && java_lang_String::is_instance(format), "must be");
+  char *buf = java_lang_String::as_utf8_string(format);
+  tty->print(buf, val);
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline))
+  union {
+      jlong l;
+      jdouble d;
+      jfloat f;
+  } uu;
+  uu.l = value;
+  switch (typeChar) {
+    case 'z': tty->print(value == 0 ? "false" : "true"); break;
+    case 'b': tty->print("%d", (jbyte) value); break;
+    case 'c': tty->print("%c", (jchar) value); break;
+    case 's': tty->print("%d", (jshort) value); break;
+    case 'i': tty->print("%d", (jint) value); break;
+    case 'f': tty->print("%f", uu.f); break;
+    case 'j': tty->print(INT64_FORMAT, value); break;
+    case 'd': tty->print("%lf", uu.d); break;
+    default: assert(false, "unknown typeChar"); break;
+  }
+  if (newline) {
+    tty->cr();
+  }
+JRT_END
 
 // JVM_InitializeGraalRuntime
 JVM_ENTRY(jobject, JVM_InitializeGraalRuntime(JNIEnv *env, jclass graalclass))
--- a/src/share/vm/graal/graalRuntime.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/graal/graalRuntime.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -24,4 +24,144 @@
 #ifndef SHARE_VM_GRAAL_GRAAL_RUNTIME_HPP
 #define SHARE_VM_GRAAL_GRAAL_RUNTIME_HPP
 
+#include "code/stubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/deoptimization.hpp"
+
+// A GraalStubAssembler is a MacroAssembler w/ extra functionality for runtime
+// stubs. Currently it 'knows' some stub info. Eventually, the information
+// may be set automatically or can be asserted when using specialised
+// GraalStubAssembler functions.
+
+class GraalStubAssembler: public MacroAssembler {
+ private:
+  const char* _name;
+  bool        _must_gc_arguments;
+  int         _frame_size;
+  int         _num_rt_args;
+  int         _stub_id;
+
+ public:
+  // creation
+  GraalStubAssembler(CodeBuffer* code, const char * name, int stub_id);
+  void set_info(const char* name, bool must_gc_arguments);
+
+  void set_frame_size(int size);
+  void set_num_rt_args(int args);
+
+  // accessors
+  const char* name() const                       { return _name; }
+  bool  must_gc_arguments() const                { return _must_gc_arguments; }
+  int frame_size() const                         { return _frame_size; }
+  int num_rt_args() const                        { return _num_rt_args; }
+  int stub_id() const                            { return _stub_id; }
+
+  void verify_stack_oop(int offset) PRODUCT_RETURN;
+  void verify_not_null_oop(Register r)  PRODUCT_RETURN;
+
+  // runtime calls (return offset of call to be used by GC map)
+  int call_RT(Register oop_result1, Register metadata_result, address entry, int args_size = 0);
+  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1);
+  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2);
+  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3);
+};
+
+// set frame size and return address offset to these values in blobs
+// (if the compiled frame uses ebp as link pointer on IA; otherwise,
+// the frame size must be fixed)
+enum {
+  no_frame_size            = -1
+};
+
+// Holds all assembly stubs and VM
+// runtime routines needed by code code generated
+// by Graal.
+#define GRAAL_STUBS(stub, last_entry) \
+  stub(graal_register_finalizer)      \
+  stub(graal_new_instance)            \
+  stub(graal_new_type_array)          \
+  stub(graal_new_object_array)  \
+  stub(graal_new_multi_array)         \
+  stub(graal_handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \
+  stub(graal_slow_subtype_check)      \
+  stub(graal_unwind_exception_call)   \
+  stub(graal_OSR_migration_end)       \
+  stub(graal_arithmetic_frem)         \
+  stub(graal_arithmetic_drem)         \
+  stub(graal_monitorenter)            \
+  stub(graal_monitorexit)             \
+  stub(graal_verify_oop)              \
+  stub(graal_vm_error)                \
+  stub(graal_set_deopt_info)          \
+  stub(graal_create_null_pointer_exception) \
+  stub(graal_create_out_of_bounds_exception) \
+  stub(graal_log_object)              \
+  stub(graal_log_printf)              \
+  stub(graal_log_primitive)           \
+  last_entry(number_of_ids)
+
+#define DECLARE_STUB_ID(x)       x ## _id ,
+#define DECLARE_LAST_STUB_ID(x)  x
+#define STUB_NAME(x)             #x " GraalRuntime stub",
+#define LAST_STUB_NAME(x)        #x " GraalRuntime stub"
+
+class GraalRuntime: public AllStatic {
+  friend class VMStructs;
+
+ public:
+  enum StubID {
+    GRAAL_STUBS(DECLARE_STUB_ID, DECLARE_LAST_STUB_ID)
+  };
+
+ private:
+  static CodeBlob* _blobs[number_of_ids];
+  static const char* _blob_names[];
+
+  // stub generation
+  static void       generate_blob_for(BufferBlob* blob, StubID id);
+  static OopMapSet* generate_code_for(StubID id, GraalStubAssembler* sasm);
+  static OopMapSet* generate_handle_exception(StubID id, GraalStubAssembler* sasm);
+  static void       generate_unwind_exception(GraalStubAssembler *sasm);
+
+  static OopMapSet* generate_stub_call(GraalStubAssembler* sasm, Register result, address entry,
+                                       Register arg1 = noreg, Register arg2 = noreg, Register arg3 = noreg);
+
+  // runtime entry points
+  static void new_instance    (JavaThread* thread, Klass* klass);
+  static void new_type_array  (JavaThread* thread, Klass* klass, jint length);
+  static void new_object_array(JavaThread* thread, Klass* klass, jint length);
+  static void new_multi_array (JavaThread* thread, Klass* klass, int rank, jint* dims);
+
+  static void unimplemented_entry   (JavaThread* thread, StubID id);
+
+  static address exception_handler_for_pc(JavaThread* thread);
+
+  static void graal_create_null_exception(JavaThread* thread);
+  static void graal_create_out_of_bounds_exception(JavaThread* thread, jint index);
+  static void graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock);
+  static void graal_monitorexit (JavaThread* thread, oopDesc* obj, BasicLock* lock);
+  static void graal_vm_error(JavaThread* thread, oop where, oop format, jlong value);
+  static void graal_log_printf(JavaThread* thread, oop format, jlong value);
+  static void graal_log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline);
+
+  // Note: Must be kept in sync with constants in com.oracle.graal.snippets.Log
+  enum {
+    LOG_OBJECT_NEWLINE = 0x01,
+    LOG_OBJECT_STRING  = 0x02,
+    LOG_OBJECT_ADDRESS = 0x04
+  };
+  static void graal_log_object(JavaThread* thread, oop msg, jint flags);
+
+ public:
+  // initialization
+  static void initialize(BufferBlob* blob);
+
+  // stubs
+  static CodeBlob* blob_for (StubID id);
+  static address   entry_for(StubID id)          { return blob_for(id)->code_begin(); }
+  static const char* name_for (StubID id);
+  static const char* name_for_address(address entry);
+};
+
 #endif // SHARE_VM_GRAAL_GRAAL_RUNTIME_HPP
--- a/src/share/vm/interpreter/interpreter.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/interpreter/interpreter.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -385,7 +385,8 @@
 address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) {
   assert(method->contains(bcp), "just checkin'");
   Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
+
   if(code == Bytecodes::_athrow ) {
     return Interpreter::rethrow_exception_entry();
   }
@@ -451,7 +452,8 @@
     case Bytecodes::_getstatic :
     case Bytecodes::_putstatic :
     case Bytecodes::_aastore   :
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
+
     //special case of reexecution
     case Bytecodes::_athrow    :
 #endif
--- a/src/share/vm/memory/allocation.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/memory/allocation.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -34,6 +34,9 @@
 #ifdef COMPILER2
 #include "opto/c2_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 
 #include <new>
 
--- a/src/share/vm/oops/klass.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/oops/klass.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -256,7 +256,7 @@
   void set_java_mirror(oop m) { klass_oop_store(&_java_mirror, m); }
 
 #ifdef GRAAL
-  // graal mirror
+  // Graal mirror
   oop graal_mirror() const               { return _graal_mirror; }
   void set_graal_mirror(oop m)           { oop_store((oop*) &_graal_mirror, m); }
 #endif
--- a/src/share/vm/precompiled/precompiled.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/precompiled/precompiled.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -285,6 +285,9 @@
 # include "c1/c1_ValueType.hpp"
 # include "c1/c1_globals.hpp"
 #endif // COMPILER1
+#ifdef GRAAL
+# include "graal/graalGlobals.hpp"
+#endif // GRAAL
 #ifndef SERIALGC
 # include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
 # include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
--- a/src/share/vm/prims/jvm.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/prims/jvm.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -372,6 +372,8 @@
     const char* compiler_name = "HotSpot " CSIZE "Client Compiler";
 #elif defined(COMPILER2)
     const char* compiler_name = "HotSpot " CSIZE "Server Compiler";
+#elif defined(GRAAL)
+    const char* compiler_name = "HotSpot " CSIZE "Graal Compiler";
 #else
     const char* compiler_name = "";
 #endif // compilers
--- a/src/share/vm/runtime/arguments.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -1515,7 +1515,7 @@
 
 void Arguments::set_g1_gc_flags() {
   assert(UseG1GC, "Error");
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
   FastTLABRefill = false;
 #endif
   FLAG_SET_DEFAULT(ParallelGCThreads,
@@ -2034,16 +2034,26 @@
     jio_fprintf(defaultStream::error_stream(),
                     "CompressedOops are not supported in Graal at the moment\n");
         status = false;
+  } else {
+    // This prevents the flag being set to true by set_ergonomics_flags()
+    FLAG_SET_CMDLINE(bool, UseCompressedOops, false);
   }
+
   if (UseCompressedKlassPointers) {
     jio_fprintf(defaultStream::error_stream(),
                     "UseCompressedKlassPointers are not supported in Graal at the moment\n");
         status = false;
+  } else {
+    // This prevents the flag being set to true by set_ergonomics_flags()
+    FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false);
   }
   if (UseG1GC) {
     jio_fprintf(defaultStream::error_stream(),
                         "G1 is not supported in Graal at the moment\n");
         status = false;
+  } else {
+    // This prevents the flag being set to true by set_ergonomics_flags()
+    FLAG_SET_CMDLINE(bool, UseG1GC, false);
   }
 
   if (!ScavengeRootsInCode) {
@@ -2957,9 +2967,9 @@
     }
 #ifdef GRAAL
     else if (match_option(option, "-G:", &tail)) { // -G:XXX
-      // Option for the graal compiler.
+      // Option for the Graal compiler.
       if (PrintVMOptions) {
-        tty->print_cr("graal option %s", tail);
+        tty->print_cr("Graal option %s", tail);
       }
       Arguments::add_graal_arg(tail);
 
@@ -3432,7 +3442,7 @@
   // which are subtlely different from each other but neither works with
   // biased locking.
   if (UseHeavyMonitors
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
       || !UseFastLocking
 #endif // COMPILER1
     ) {
--- a/src/share/vm/runtime/arguments.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/arguments.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -229,7 +229,7 @@
   static char** _jvm_args_array;
   static int    _num_jvm_args;
 #ifdef GRAAL
-  // an array containing all graal arguments specified in the command line
+  // an array containing all Graal arguments specified in the command line
   static char** _graal_args_array;
   static int    _num_graal_args;
 #endif
--- a/src/share/vm/runtime/compilationPolicy.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -188,6 +188,7 @@
 #endif
 
 #ifdef COMPILER1
+  GRAAL_ONLY(ShouldNotReachHere();)
   if (is_c1_compile(comp_level)) {
     return _compiler_count;
   } else {
--- a/src/share/vm/runtime/globals.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/globals.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -36,6 +36,9 @@
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 #ifdef COMPILER2
 #include "opto/c2_globals.hpp"
 #endif
@@ -215,6 +218,18 @@
   #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1 notproduct}", DEFAULT },
 #endif
 
+#define GRAAL_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Graal product}", DEFAULT },
+#define GRAAL_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Graal pd product}", DEFAULT },
+#ifdef PRODUCT
+  #define GRAAL_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+  #define GRAAL_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
+  #define GRAAL_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+#else
+  #define GRAAL_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Graal}", DEFAULT },
+  #define GRAAL_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{Graal pd}", DEFAULT },
+  #define GRAAL_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Graal notproduct}", DEFAULT },
+#endif
+
 #define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 product}", DEFAULT },
 #define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 pd product}", DEFAULT },
 #define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 diagnostic}", DEFAULT },
@@ -262,6 +277,9 @@
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
+#ifdef GRAAL
+ GRAAL_FLAGS(GRAAL_DEVELOP_FLAG_STRUCT, GRAAL_PD_DEVELOP_FLAG_STRUCT, GRAAL_PRODUCT_FLAG_STRUCT, GRAAL_PD_PRODUCT_FLAG_STRUCT, GRAAL_NOTPRODUCT_FLAG_STRUCT)
+#endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
 #endif
--- a/src/share/vm/runtime/globals.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/globals.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -120,6 +120,20 @@
 # include "c1_globals_bsd.hpp"
 #endif
 #endif
+#ifdef GRAAL
+#ifdef TARGET_ARCH_x86
+# include "graalGlobals_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "graalGlobals_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "graalGlobals_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "graalGlobals_ppc.hpp"
+#endif
+#endif // GRAAL
 #ifdef COMPILER2
 #ifdef TARGET_ARCH_x86
 # include "c2_globals_x86.hpp"
@@ -149,7 +163,7 @@
 #endif
 #endif
 
-#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK)
+#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !defined(GRAAL)
 define_pd_global(bool, BackgroundCompilation,        false);
 define_pd_global(bool, UseTLAB,                      false);
 define_pd_global(bool, CICompileOSR,                 false);
--- a/src/share/vm/runtime/globals_extension.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/globals_extension.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -66,6 +66,18 @@
   #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
 #endif
 
+#define GRAAL_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
+#define GRAAL_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
+#ifdef PRODUCT
+  #define GRAAL_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+#else
+  #define GRAAL_DEVELOP_FLAG_MEMBER(type, name, value, doc)       FLAG_MEMBER(name),
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           FLAG_MEMBER(name),
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
+#endif
+
 #define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
 #define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
@@ -100,6 +112,9 @@
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
+#ifdef GRAAL
+ GRAAL_FLAGS(GRAAL_DEVELOP_FLAG_MEMBER, GRAAL_PD_DEVELOP_FLAG_MEMBER, GRAAL_PRODUCT_FLAG_MEMBER, GRAAL_PD_PRODUCT_FLAG_MEMBER, GRAAL_NOTPRODUCT_FLAG_MEMBER)
+#endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
 #endif
@@ -139,6 +154,17 @@
   #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
   #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
 #endif
+#define GRAAL_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
+#define GRAAL_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
+#ifdef PRODUCT
+  #define GRAAL_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+#else
+  #define GRAAL_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       FLAG_MEMBER_WITH_TYPE(name,type),
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
+#endif
 #ifdef _LP64
 #define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
 #else
@@ -205,6 +231,13 @@
           C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
+#ifdef GRAAL
+ GRAAL_FLAGS(GRAAL_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+#endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
--- a/src/share/vm/runtime/safepoint.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -90,6 +90,9 @@
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 
 // --------------------------------------------------------------------------------------------------
 // Implementation of Safepoint begin/end
--- a/src/share/vm/runtime/sharedRuntime.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -892,7 +892,7 @@
 #endif
 #ifdef GRAAL
         if (TraceSignals) {
-          tty->print_cr("graal implicit div0");
+          tty->print_cr("Graal implicit div0");
         }
         target_pc = deoptimize_for_implicit_exception(thread, pc, nm, Deoptimization::Reason_div0_check);
 #else
--- a/src/share/vm/runtime/thread.cpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/runtime/thread.cpp	Mon Dec 03 15:32:17 2012 +0100
@@ -3680,7 +3680,7 @@
   }
 
   // initialize compiler(s)
-#if defined(COMPILER1) || defined(COMPILER2)
+#if defined(COMPILER1) || defined(COMPILER2) || defined(GRAAL)
   CompileBroker::compilation_init();
 #endif
 
--- a/src/share/vm/utilities/globalDefinitions.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -798,16 +798,16 @@
 
 #if defined(COMPILER2) || defined(SHARK)
   CompLevel_highest_tier      = CompLevel_full_optimization,  // pure C2 and tiered
-#elif defined(COMPILER1)
-  CompLevel_highest_tier      = CompLevel_simple,             // pure C1
+#elif defined(COMPILER1) || defined(GRAAL)
+  CompLevel_highest_tier      = CompLevel_simple,             // pure C1 or Graal
 #else
   CompLevel_highest_tier      = CompLevel_none,
 #endif
 
 #if defined(TIERED)
   CompLevel_initial_compile   = CompLevel_full_profile        // tiered
-#elif defined(COMPILER1)
-  CompLevel_initial_compile   = CompLevel_simple              // pure C1
+#elif defined(COMPILER1) || defined(GRAAL)
+  CompLevel_initial_compile   = CompLevel_simple              // pure C1 or Graal
 #elif defined(COMPILER2) || defined(SHARK)
   CompLevel_initial_compile   = CompLevel_full_optimization   // pure C2
 #else
--- a/src/share/vm/utilities/top.hpp	Mon Dec 03 13:56:13 2012 +0100
+++ b/src/share/vm/utilities/top.hpp	Mon Dec 03 15:32:17 2012 +0100
@@ -39,6 +39,9 @@
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 #ifdef COMPILER2
 #include "opto/c2_globals.hpp"
 #endif