diff src/cpu/x86/vm/sharedRuntime_x86_64.cpp @ 21559:be896a1983c0

recast all Graal native code as JVMCI code (JBS:GRAAL-53)
author Doug Simon <doug.simon@oracle.com>
date Thu, 28 May 2015 15:36:48 +0200
parents 2e35a4ea22ac
children b3d5463a8362
line wrap: on
line diff
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed May 27 13:43:27 2015 +0200
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu May 28 15:36:48 2015 +0200
@@ -40,8 +40,8 @@
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
-#ifdef GRAAL
-#include "graal/graalJavaAccess.hpp"
+#ifdef JVMCI
+#include "jvmci/jvmciJavaAccess.hpp"
 #endif
 
 #define __ masm->
@@ -73,7 +73,7 @@
 #define DEF_YMM_HI_OFFS(regnum) ymm_hi ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt
   enum layout {
     fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
-#if defined(COMPILER2) || defined(GRAAL)
+#if defined(COMPILER2) || defined(JVMCI)
     ymm_off       = fpu_state_off,            // offset in fxsave save area
     DEF_YMM_HI_OFFS(0),
     DEF_YMM_HI_OFFS(1),
@@ -182,7 +182,7 @@
   __ enter();          // rsp becomes 16-byte aligned here
   __ push_CPU_state(); // Push a multiple of 16 bytes
 
-#if defined(COMPILER2) || defined(GRAAL)
+#if defined(COMPILER2) || defined(JVMCI)
   __ subptr(rsp, 256); // Save upper half of YMM registers
   if (save_vectors) {
     assert(UseAVX > 0, "256bit vectors are supported only with AVX");
@@ -206,7 +206,7 @@
     __ vextractf128h(Address(rsp, ymm_hi15_off * BytesPerInt), xmm15);
   }
 #else
-  assert(!save_vectors, "vectors are generated only by C2 and Graal");
+  assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 #endif
   if (frame::arg_reg_save_area_bytes != 0) {
     // Allocate argument register save area
@@ -257,7 +257,7 @@
   map->set_callee_saved(STACK_OFFSET(xmm15_off), xmm15->as_VMReg());
 
 
-#if defined(COMPILER2) || defined(GRAAL)
+#if defined(COMPILER2) || defined(JVMCI)
   if (save_vectors) {
     map->set_callee_saved(STACK_OFFSET(ymm_hi0_off ), xmm0->as_VMReg()->next()->next()->next()->next());
     map->set_callee_saved(STACK_OFFSET(ymm_hi1_off ), xmm1->as_VMReg()->next()->next()->next()->next());
@@ -321,7 +321,7 @@
     // Pop arg register save area
     __ addptr(rsp, frame::arg_reg_save_area_bytes);
   }
-#if defined(COMPILER2) || defined(GRAAL)
+#if defined(COMPILER2) || defined(JVMCI)
   if (restore_vectors) {
     // Restore upper half of YMM registes.
     assert(UseAVX > 0, "256bit vectors are supported only with AVX");
@@ -345,7 +345,7 @@
   }
   __ addptr(rsp, 256);
 #else
-  assert(!restore_vectors, "vectors are generated only by C2 and Graal");
+  assert(!restore_vectors, "vectors are generated only by C2 and JVMCI");
 #endif
   // Recover CPU state
   __ pop_CPU_state();
@@ -743,7 +743,7 @@
     __ block_comment("} verify_i2ce ");
   }
 
-#ifdef GRAAL
+#ifdef JVMCI
   if (frame_extension_argument != -1) {
     // The frame_extension_argument is an int that describes the
     // expected amount of argument space in the caller frame.  If that
@@ -814,13 +814,13 @@
   // Pre-load the register-jump target early, to schedule it better.
   __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
 
-#ifdef GRAAL
+#ifdef JVMCI
   // check if this call should be routed towards a specific entry point
-  __ cmpptr(Address(r15_thread, in_bytes(JavaThread::graal_alternate_call_target_offset())), 0);
+  __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
   Label no_alternative_target;
   __ jcc(Assembler::equal, no_alternative_target);
-  __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::graal_alternate_call_target_offset())));
-  __ movptr(Address(r15_thread, in_bytes(JavaThread::graal_alternate_call_target_offset())), 0);
+  __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
+  __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
   __ bind(no_alternative_target);
 #endif
 
@@ -3428,8 +3428,8 @@
   __ jmp(cont);
 
   int reexecute_offset = __ pc() - start;
-#if defined(COMPILERGRAAL) && !defined(COMPILER1)
-  // Graal does not use this kind of deoptimization
+#if defined(COMPILERJVMCI) && !defined(COMPILER1)
+  // JVMCI does not use this kind of deoptimization
   __ should_not_reach_here();
 #endif
 
@@ -3442,10 +3442,10 @@
   __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
   __ jmp(cont);
 
-#ifdef GRAAL
+#ifdef JVMCI
   int implicit_exception_uncommon_trap_offset = __ pc() - start;
 
-  __ pushptr(Address(r15_thread, in_bytes(JavaThread::graal_implicit_exception_pc_offset())));
+  __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
 
   int uncommon_trap_offset = __ pc() - start;
 
@@ -3466,7 +3466,7 @@
 
   Label after_fetch_unroll_info_call;
   __ jmp(after_fetch_unroll_info_call);
-#endif // GRAAL
+#endif // JVMCI
 
   int exception_offset = __ pc() - start;
 
@@ -3553,7 +3553,7 @@
 
   __ reset_last_Java_frame(false, false);
 
-#ifdef GRAAL
+#ifdef JVMCI
   __ bind(after_fetch_unroll_info_call);
 #endif
 
@@ -3731,7 +3731,7 @@
 
   _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
   _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
-#ifdef GRAAL
+#ifdef JVMCI
   _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
   _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
 #endif