diff src/cpu/x86/vm/c1_LIRAssembler_x86.cpp @ 1295:3cf667df43ef

6919934: JSR 292 needs to support x86 C1 Summary: This implements JSR 292 support for C1 x86. Reviewed-by: never, jrose, kvn
author twisti
date Tue, 09 Mar 2010 20:16:19 +0100
parents 7b4415a18c8a
children c466efa608d5
line wrap: on
line diff
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Mar 08 04:46:30 2010 -0800
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Mar 09 20:16:19 2010 +0100
@@ -436,40 +436,18 @@
 
   int offset = code_offset();
 
-  // if the method does not have an exception handler, then there is
-  // no reason to search for one
-  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
-    // the exception oop and pc are in rax, and rdx
-    // no other registers need to be preserved, so invalidate them
-    __ invalidate_registers(false, true, true, false, true, true);
-
-    // check that there is really an exception
-    __ verify_not_null_oop(rax);
-
-    // search an exception handler (rax: exception oop, rdx: throwing pc)
-    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
-
-    // if the call returns here, then the exception handler for particular
-    // exception doesn't exist -> unwind activation and forward exception to caller
-  }
-
-  // the exception oop is in rax,
+  // the exception oop and pc are in rax, and rdx
   // no other registers need to be preserved, so invalidate them
-  __ invalidate_registers(false, true, true, true, true, true);
+  __ invalidate_registers(false, true, true, false, true, true);
 
   // check that there is really an exception
   __ verify_not_null_oop(rax);
 
-  // unlock the receiver/klass if necessary
-  // rax,: exception
-  ciMethod* method = compilation()->method();
-  if (method->is_synchronized() && GenerateSynchronizationCode) {
-    monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
-  }
-
-  // unwind activation and forward exception to caller
-  // rax,: exception
-  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+  // search an exception handler (rax: exception oop, rdx: throwing pc)
+  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
+
+  __ stop("should not reach here");
+
   assert(code_offset() - offset <= exception_handler_size, "overflow");
   __ end_a_stub();
 
@@ -495,8 +473,10 @@
 
   int offset = code_offset();
   InternalAddress here(__ pc());
+
   __ pushptr(here.addr());
   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
   __ end_a_stub();
 
@@ -593,7 +573,7 @@
   }
 
   // Pop the stack before the safepoint code
-  __ leave();
+  __ remove_frame(initial_frame_size_in_bytes());
 
   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 
@@ -2738,6 +2718,7 @@
     switch (code) {
       case lir_static_call:
       case lir_optvirtual_call:
+      case lir_dynamic_call:
         offset += NativeCall::displacement_offset;
         break;
       case lir_icvirtual_call:
@@ -2753,30 +2734,41 @@
 }
 
 
-void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
+void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
   assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
-  __ call(AddressLiteral(entry, rtype));
-  add_call_info(code_offset(), info);
+  __ call(AddressLiteral(op->addr(), rtype));
+  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
 }
 
 
-void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
+void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
   RelocationHolder rh = virtual_call_Relocation::spec(pc());
   __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
   assert(!os::is_MP() ||
          (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
-  __ call(AddressLiteral(entry, rh));
-  add_call_info(code_offset(), info);
+  __ call(AddressLiteral(op->addr(), rh));
+  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
 }
 
 
 /* Currently, vtable-dispatch is only enabled for sparc platforms */
-void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
+void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
   ShouldNotReachHere();
 }
 
+
+void LIR_Assembler::preserve_SP() {
+  __ movptr(rbp, rsp);
+}
+
+
+void LIR_Assembler::restore_SP() {
+  __ movptr(rsp, rbp);
+}
+
+
 void LIR_Assembler::emit_static_call_stub() {
   address call_pc = __ pc();
   address stub = __ start_a_stub(call_stub_size);
@@ -2829,10 +2821,12 @@
     } else {
       unwind_id = Runtime1::handle_exception_nofpu_id;
     }
+    __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
   } else {
-    unwind_id = Runtime1::unwind_exception_id;
+    // remove the activation
+    __ remove_frame(initial_frame_size_in_bytes());
+    __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
   }
-  __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
 
   // enough room for two byte trap
   __ nop();