diff src/cpu/sparc/vm/stubGenerator_sparc.cpp @ 11173:6b0fd0964b87

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Wed, 31 Jul 2013 11:00:54 +0200
parents 40b8c383bc31 980532a806a5
children cefad50507d8
line wrap: on
line diff
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Jul 30 13:03:28 2013 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Jul 31 11:00:54 2013 +0200
@@ -410,6 +410,51 @@
     return start;
   }
 
+  // Safefetch stubs.
+  void generate_safefetch(const char* name, int size, address* entry,
+                          address* fault_pc, address* continuation_pc) {
+    // safefetch signatures:
+    //   int      SafeFetch32(int*      adr, int      errValue);
+    //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+    //
+    // arguments:
+    //   o0 = adr
+    //   o1 = errValue
+    //
+    // result:
+    //   o0  = *adr or errValue
+
+    StubCodeMark mark(this, "StubRoutines", name);
+
+    // Entry point, pc or function descriptor.
+    __ align(CodeEntryAlignment);
+    *entry = __ pc();
+
+    __ mov(O0, G1);  // g1 = o0
+    __ mov(O1, O0);  // o0 = o1
+    // Load *adr into c_rarg1, may fault.
+    *fault_pc = __ pc();
+    switch (size) {
+      case 4:
+        // int32_t
+        __ ldsw(G1, 0, O0);  // o0 = [g1]
+        break;
+      case 8:
+        // int64_t
+        __ ldx(G1, 0, O0);   // o0 = [g1]
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+
+    // return errValue or *adr
+    *continuation_pc = __ pc();
+    // By convention with the trap handler we ensure there is a non-CTI
+    // instruction in the trap shadow.
+    __ nop();
+    __ retl();
+    __ delayed()->nop();
+  }
 
   //------------------------------------------------------------------------------------------------------------------------
   // Continuation point for throwing of implicit exceptions that are not handled in
@@ -566,7 +611,7 @@
     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
     address start = __ pc();
 
-    __ flush_windows();
+    __ flushw();
     __ retl(false);
     __ delayed()->add( FP, STACK_BIAS, O0 );
     // The returned value must be a stack pointer whose register save area
@@ -575,67 +620,9 @@
     return start;
   }
 
-  // Helper functions for v8 atomic operations.
-  //
-  void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
-    if (mark_oop_reg == noreg) {
-      address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
-      __ set((intptr_t)lock_ptr, lock_ptr_reg);
-    } else {
-      assert(scratch_reg != noreg, "just checking");
-      address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
-      __ set((intptr_t)lock_ptr, lock_ptr_reg);
-      __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
-      __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
-    }
-  }
-
-  void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
-
-    get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
-    __ set(StubRoutines::Sparc::locked, lock_reg);
-    // Initialize yield counter
-    __ mov(G0,yield_reg);
-
-    __ BIND(retry);
-    __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
-
-    // This code can only be called from inside the VM, this
-    // stub is only invoked from Atomic::add().  We do not
-    // want to use call_VM, because _last_java_sp and such
-    // must already be set.
-    //
-    // Save the regs and make space for a C call
-    __ save(SP, -96, SP);
-    __ save_all_globals_into_locals();
-    BLOCK_COMMENT("call os::naked_sleep");
-    __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
-    __ delayed()->nop();
-    __ restore_globals_from_locals();
-    __ restore();
-    // reset the counter
-    __ mov(G0,yield_reg);
-
-    __ BIND(dontyield);
-
-    // try to get lock
-    __ swap(lock_ptr_reg, 0, lock_reg);
-
-    // did we get the lock?
-    __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
-    __ br(Assembler::notEqual, true, Assembler::pn, retry);
-    __ delayed()->add(yield_reg,1,yield_reg);
-
-    // yes, got lock. do the operation here.
-  }
-
-  void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
-    __ st(lock_reg, lock_ptr_reg, 0); // unlock
-  }
-
   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O0
   //      dest:           O1
@@ -656,33 +643,14 @@
       __ mov(O0, O3);       // scratch copy of exchange value
       __ ld(O1, 0, O2);     // observe the previous value
       // try to replace O2 with O3
-      __ cas_under_lock(O1, O2, O3,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
+      __ cas(O1, O2, O3);
       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 
       __ retl(false);
       __ delayed()->mov(O2, O0);  // report previous value to caller
-
     } else {
-      if (VM_Version::v9_instructions_work()) {
-        __ retl(false);
-        __ delayed()->swap(O1, 0, O0);
-      } else {
-        const Register& lock_reg = O2;
-        const Register& lock_ptr_reg = O3;
-        const Register& yield_reg = O4;
-
-        Label retry;
-        Label dontyield;
-
-        generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-        // got the lock, do the swap
-        __ swap(O1, 0, O0);
-
-        generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-        __ retl(false);
-        __ delayed()->nop();
-      }
+      __ retl(false);
+      __ delayed()->swap(O1, 0, O0);
     }
 
     return start;
@@ -691,7 +659,7 @@
 
   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O0
   //      dest:           O1
@@ -701,15 +669,12 @@
   //
   //     O0: the value previously stored in dest
   //
-  // Overwrites (v8): O3,O4,O5
-  //
   address generate_atomic_cmpxchg() {
     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
     address start = __ pc();
 
     // cmpxchg(dest, compare_value, exchange_value)
-    __ cas_under_lock(O1, O2, O0,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
+    __ cas(O1, O2, O0);
     __ retl(false);
     __ delayed()->nop();
 
@@ -718,7 +683,7 @@
 
   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O1:O0
   //      dest:           O2
@@ -728,17 +693,12 @@
   //
   //     O1:O0: the value previously stored in dest
   //
-  // This only works on V9, on V8 we don't generate any
-  // code and just return NULL.
-  //
   // Overwrites: G1,G2,G3
   //
   address generate_atomic_cmpxchg_long() {
     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
     address start = __ pc();
 
-    if (!VM_Version::supports_cx8())
-        return NULL;;
     __ sllx(O0, 32, O0);
     __ srl(O1, 0, O1);
     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
@@ -756,7 +716,7 @@
 
   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
   //
-  // Arguments :
+  // Arguments:
   //
   //      add_value: O0   (e.g., +1 or -1)
   //      dest:      O1
@@ -765,47 +725,22 @@
   //
   //     O0: the new value stored in dest
   //
-  // Overwrites (v9): O3
-  // Overwrites (v8): O3,O4,O5
+  // Overwrites: O3
   //
   address generate_atomic_add() {
     StubCodeMark mark(this, "StubRoutines", "atomic_add");
     address start = __ pc();
     __ BIND(_atomic_add_stub);
 
-    if (VM_Version::v9_instructions_work()) {
-      Label(retry);
-      __ BIND(retry);
-
-      __ lduw(O1, 0, O2);
-      __ add(O0, O2, O3);
-      __ cas(O1, O2, O3);
-      __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
-      __ retl(false);
-      __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
-    } else {
-      const Register& lock_reg = O2;
-      const Register& lock_ptr_reg = O3;
-      const Register& value_reg = O4;
-      const Register& yield_reg = O5;
-
-      Label(retry);
-      Label(dontyield);
-
-      generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-      // got lock, do the increment
-      __ ld(O1, 0, value_reg);
-      __ add(O0, value_reg, value_reg);
-      __ st(value_reg, O1, 0);
-
-      // %%% only for RMO and PSO
-      __ membar(Assembler::StoreStore);
-
-      generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-
-      __ retl(false);
-      __ delayed()->mov(value_reg, O0);
-    }
+    Label(retry);
+    __ BIND(retry);
+
+    __ lduw(O1, 0, O2);
+    __ add(O0, O2, O3);
+    __ cas(O1, O2, O3);
+    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
+    __ retl(false);
+    __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 
     return start;
   }
@@ -841,7 +776,7 @@
     __ mov(G3, L3);
     __ mov(G4, L4);
     __ mov(G5, L5);
-    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+    for (i = 0; i < 64; i += 2) {
       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
     }
 
@@ -855,7 +790,7 @@
     __ mov(L3, G3);
     __ mov(L4, G4);
     __ mov(L5, G5);
-    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+    for (i = 0; i < 64; i += 2) {
       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
     }
 
@@ -3426,6 +3361,14 @@
 
     // Don't initialize the platform math functions since sparc
     // doesn't have intrinsics for these operations.
+
+    // Safefetch stubs.
+    generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
+                                                       &StubRoutines::_safefetch32_fault_pc,
+                                                       &StubRoutines::_safefetch32_continuation_pc);
+    generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
+                                                       &StubRoutines::_safefetchN_fault_pc,
+                                                       &StubRoutines::_safefetchN_continuation_pc);
   }