changeset 1890:f5c8d6e5bfee

Merge
author jcoomes
date Mon, 01 Nov 2010 10:49:14 -0700
parents ee0d26abaad3 (diff) c766bae6c14d (current diff)
children 9de67bf4244d
files src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp
diffstat 80 files changed, 6147 insertions(+), 5032 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Oct 28 14:46:29 2010 -0700
+++ b/.hgtags	Mon Nov 01 10:49:14 2010 -0700
@@ -122,4 +122,6 @@
 2f25f2b8de2700a1822463b1bd3d02b5e218018f jdk7-b110
 07b042e13dde4f3479ba9ec55120fcd5e8623323 jdk7-b111
 5511edd5d719f3fc9fdd04879482026a3d2c8652 jdk7-b112
+beef35b96b81129c375d572357fb9548d9020db1 jdk7-b113
+68d6141ea19de3a9ba98ef753f0da41a61f736a0 jdk7-b114
 5511edd5d719f3fc9fdd04879482026a3d2c8652 hs20-b01
--- a/make/hotspot_version	Thu Oct 28 14:46:29 2010 -0700
+++ b/make/hotspot_version	Mon Nov 01 10:49:14 2010 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=20
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=01
+HS_BUILD_NUMBER=02
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -3094,11 +3094,10 @@
 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
                                               Register temp_reg,
                                               Label& wrong_method_type) {
-  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
   assert_different_registers(mtype_reg, mh_reg, temp_reg);
   // compare method type against that of the receiver
   RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
-  ld_ptr(mh_reg, mhtype_offset, temp_reg);
+  load_heap_oop(mh_reg, mhtype_offset, temp_reg);
   cmp(temp_reg, mtype_reg);
   br(Assembler::notEqual, false, Assembler::pn, wrong_method_type);
   delayed()->nop();
@@ -3112,16 +3111,15 @@
 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
                                                 Register temp_reg) {
   assert_different_registers(vmslots_reg, mh_reg, temp_reg);
-  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
   // load mh.type.form.vmslots
   if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
     // hoist vmslots into every mh to avoid dependent load chain
-    ld(    Address(mh_reg,    delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)),   vmslots_reg);
+    ld(           Address(mh_reg,    delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)),   vmslots_reg);
   } else {
     Register temp2_reg = vmslots_reg;
-    ld_ptr(Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
-    ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
-    ld(    Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
+    load_heap_oop(Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
+    load_heap_oop(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
+    ld(           Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
   }
 }
 
@@ -3130,9 +3128,8 @@
   assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
   assert_different_registers(mh_reg, temp_reg);
 
-  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
-
   // pick out the interpreted side of the handler
+  // NOTE: vmentry is not an oop!
   ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
 
   // off we go...
@@ -4653,6 +4650,11 @@
   }
 }
 
+void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
+  if (s2.is_constant())  load_heap_oop(s1, s2.as_constant(), d);
+  else                   load_heap_oop(s1, s2.as_register(), d);
+}
+
 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
   if (UseCompressedOops) {
     assert(s1 != d && s2 != d, "not enough registers");
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -825,6 +825,12 @@
   // test if -4096 <= x <= 4095
   static bool is_simm13(int x) { return is_simm(x, 13); }
 
+  // test if label is in simm16 range in words (wdisp16).
+  bool is_in_wdisp16_range(Label& L) {
+    intptr_t d = intptr_t(pc()) - intptr_t(target(L));
+    return is_simm(d, 18);
+  }
+
   enum ASIs { // page 72, v9
     ASI_PRIMARY        = 0x80,
     ASI_PRIMARY_LITTLE = 0x88
@@ -2103,6 +2109,7 @@
   void load_heap_oop(const Address& s, Register d);
   void load_heap_oop(Register s1, Register s2, Register d);
   void load_heap_oop(Register s1, int simm13a, Register d);
+  void load_heap_oop(Register s1, RegisterOrConstant s2, Register d);
   void store_heap_oop(Register d, Register s1, Register s2);
   void store_heap_oop(Register d, Register s1, int simm13a);
   void store_heap_oop(Register d, const Address& a, int offset = 0);
@@ -2225,7 +2232,7 @@
   void stop(const char* msg);                          // prints msg, dumps registers and stops execution
   void warn(const char* msg);                          // prints msg, but don't stop
   void untested(const char* what = "");
-  void unimplemented(const char* what = "")              { char* b = new char[1024];  sprintf(b, "unimplemented: %s", what);  stop(b); }
+  void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
   void should_not_reach_here()                   { stop("should not reach here"); }
   void print_CPU_state();
 
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -425,8 +425,13 @@
   Register pre_val_reg = pre_val()->as_register();
 
   ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
-  __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                    pre_val_reg, _continuation);
+  if (__ is_in_wdisp16_range(_continuation)) {
+    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
+                      pre_val_reg, _continuation);
+  } else {
+    __ cmp(pre_val_reg, G0);
+    __ brx(Assembler::equal, false, Assembler::pn, _continuation);
+  }
   __ delayed()->nop();
 
   __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
@@ -452,8 +457,13 @@
   assert(new_val()->is_register(), "Precondition.");
   Register addr_reg = addr()->as_pointer_register();
   Register new_val_reg = new_val()->as_register();
-  __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
-                    new_val_reg, _continuation);
+  if (__ is_in_wdisp16_range(_continuation)) {
+    __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
+                      new_val_reg, _continuation);
+  } else {
+    __ cmp(new_val_reg, G0);
+    __ brx(Assembler::equal, false, Assembler::pn, _continuation);
+  }
   __ delayed()->nop();
 
   __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
--- a/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -64,7 +64,7 @@
     _first_reg = pd_first_callee_saved_reg;
     _last_reg = pd_last_callee_saved_reg;
     return true;
-  } else if (cur->type() == T_INT || cur->type() == T_LONG || cur->type() == T_OBJECT) {
+  } else if (cur->type() == T_INT || cur->type() == T_LONG || cur->type() == T_OBJECT || cur->type() == T_ADDRESS) {
     _first_reg = pd_first_cpu_reg;
     _last_reg = pd_last_allocatable_cpu_reg;
     return true;
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -62,3 +62,5 @@
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
+
+define_pd_global(bool, UseMembar,            false);
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -27,6 +27,14 @@
 
 #define __ _masm->
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
                                                 address interpreted_entry) {
   // Just before the actual machine code entry point, allocate space
@@ -90,8 +98,8 @@
   }
 
   // given the MethodType, find out where the MH argument is buried
-  __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
-  __ ldsw(  Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
+  __ load_heap_oop(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
+  __ ldsw(         Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
   __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
 
   __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
@@ -105,6 +113,7 @@
 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
   // Verify that argslot lies within (Gargs, FP].
   Label L_ok, L_bad;
+  BLOCK_COMMENT("{ verify_argslot");
 #ifdef _LP64
   __ add(FP, STACK_BIAS, temp_reg);
   __ cmp(argslot_reg, temp_reg);
@@ -119,6 +128,7 @@
   __ bind(L_bad);
   __ stop(error_message);
   __ bind(L_ok);
+  BLOCK_COMMENT("} verify_argslot");
 }
 #endif
 
@@ -175,6 +185,7 @@
   //   for (temp = sp + size; temp < argslot; temp++)
   //     temp[-size] = temp[0]
   //   argslot -= size;
+  BLOCK_COMMENT("insert_arg_slots {");
   RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 
   // Keep the stack pointer 2*wordSize aligned.
@@ -187,7 +198,7 @@
 
   {
     Label loop;
-    __ bind(loop);
+    __ BIND(loop);
     // pull one word down each time through the loop
     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
     __ st_ptr(temp2_reg, Address(temp_reg, offset));
@@ -199,6 +210,7 @@
 
   // Now move the argslot down, to point to the opened-up space.
   __ add(argslot_reg, offset, argslot_reg);
+  BLOCK_COMMENT("} insert_arg_slots");
 }
 
 
@@ -235,6 +247,7 @@
   }
 #endif // ASSERT
 
+  BLOCK_COMMENT("remove_arg_slots {");
   // Pull up everything shallower than argslot.
   // Then remove the excess space on the stack.
   // The stacked return address gets pulled up with everything else.
@@ -246,7 +259,7 @@
   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
   {
     Label loop;
-    __ bind(loop);
+    __ BIND(loop);
     // pull one word up each time through the loop
     __ ld_ptr(Address(temp_reg, 0), temp2_reg);
     __ st_ptr(temp2_reg, Address(temp_reg, offset));
@@ -265,29 +278,35 @@
   const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
   RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
   __ add(SP, masked_offset, SP);
+  BLOCK_COMMENT("} remove_arg_slots");
 }
 
 
 #ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
-                              oop mh) {
-#if 0
-                              intptr_t* entry_sp,
-                              intptr_t* saved_sp,
-                              intptr_t* saved_bp) {
-  // called as a leaf from native code: do not block the JVM!
-  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
-  intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
-  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
-         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
-  if (last_sp != saved_sp)
-    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
-#endif
-
+                              oopDesc* mh) {
   printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
   print_method_handle(mh);
 }
+void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
+  if (!TraceMethodHandles)  return;
+  BLOCK_COMMENT("trace_method_handle {");
+  // save: Gargs, O5_savedSP
+  __ save_frame(16);
+  __ set((intptr_t) adaptername, O0);
+  __ mov(G3_method_handle, O1);
+  __ mov(G3_method_handle, L3);
+  __ mov(Gargs, L4);
+  __ mov(G5_method_type, L5);
+  __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
+
+  __ mov(L3, G3_method_handle);
+  __ mov(L4, Gargs);
+  __ mov(L5, G5_method_type);
+  __ restore();
+  BLOCK_COMMENT("} trace_method_handle");
+}
 #endif // PRODUCT
 
 // which conversion op types are implemented here?
@@ -348,18 +367,8 @@
   }
 
   address interp_entry = __ pc();
-  if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
 
-#ifndef PRODUCT
-  if (TraceMethodHandles) {
-    // save: Gargs, O5_savedSP
-    __ save(SP, -16*wordSize, SP);
-    __ set((intptr_t) entry_name(ek), O0);
-    __ mov(G3_method_handle, O1);
-    __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
-    __ restore(SP, 16*wordSize, SP);
-  }
-#endif // PRODUCT
+  trace_method_handle(_masm, entry_name(ek));
 
   switch ((int) ek) {
   case _raise_exception:
@@ -413,7 +422,7 @@
   case _invokestatic_mh:
   case _invokespecial_mh:
     {
-      __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
+      __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
       __ verify_oop(G5_method);
       // Same as TemplateTable::invokestatic or invokespecial,
       // minus the CP setup and profiling:
@@ -468,7 +477,7 @@
       // minus the CP setup and profiling:
       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
       Register O1_intf  = O1_scratch;
-      __ ld_ptr(G3_mh_vmtarget, O1_intf);
+      __ load_heap_oop(G3_mh_vmtarget, O1_intf);
       __ ldsw(G3_dmh_vmindex, G5_index);
       __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
@@ -523,7 +532,7 @@
       insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
 
       // Store bound argument into the new stack slot:
-      __ ld_ptr(G3_bmh_argument, O1_scratch);
+      __ load_heap_oop(G3_bmh_argument, O1_scratch);
       if (arg_type == T_OBJECT) {
         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
       } else {
@@ -541,12 +550,12 @@
       }
 
       if (direct_to_method) {
-        __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
+        __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
         __ verify_oop(G5_method);
         __ jump_indirect_to(G5_method_fie, O1_scratch);
         __ delayed()->nop();
       } else {
-        __ ld_ptr(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
+        __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
         __ verify_oop(G3_method_handle);
         __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
       }
@@ -556,7 +565,7 @@
   case _adapter_retype_only:
   case _adapter_retype_raw:
     // Immediately jump to the next MH layer:
-    __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+    __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     // This is OK when all parameter types widen.
     // It is also OK when a return type narrows.
@@ -572,8 +581,8 @@
       Address vmarg = __ argument_address(O0_argslot);
 
       // What class are we casting to?
-      __ ld_ptr(G3_amh_argument, G5_klass);  // This is a Class object!
-      __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
+      __ load_heap_oop(G3_amh_argument, G5_klass);  // This is a Class object!
+      __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
 
       Label done;
       __ ld_ptr(vmarg, O1_scratch);
@@ -590,14 +599,14 @@
 
       // If we get here, the type check failed!
       __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
-      __ ld_ptr(G3_amh_argument, O3_scratch);  // required class
+      __ load_heap_oop(G3_amh_argument, O3_scratch);  // required class
       __ ld_ptr(vmarg, O2_scratch);  // bad object
       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
       __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
 
       __ bind(done);
       // Get the new MH:
-      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
@@ -676,7 +685,7 @@
       __ st(O1_scratch, vmarg);
 
       // Get the new MH:
-      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
@@ -721,7 +730,7 @@
         ShouldNotReachHere();
       }
 
-      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
@@ -851,7 +860,7 @@
         }
       }
 
-      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
@@ -895,7 +904,7 @@
       __ brx(Assembler::less, false, Assembler::pt, loop);
       __ delayed()->nop();  // FILLME
 
-      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
@@ -913,7 +922,7 @@
 
       remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 
-      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -2586,6 +2586,8 @@
     __ restore();
 #endif
 
+    assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
+
 #ifdef ASSERT
     // caller guarantees that the arrays really are different
     // otherwise, we would have to make conjoint checks
@@ -2600,8 +2602,6 @@
     }
 #endif //ASSERT
 
-    assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
-
     checkcast_copy_entry = __ pc();
     // caller can pass a 64-bit byte count here (from generic stub)
     BLOCK_COMMENT("Entry:");
--- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -43,7 +43,7 @@
 
 // MethodHandles adapters
 enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 12000
+  method_handles_adapters_code_size = 15000
 };
 
 class Sparc {
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -3273,7 +3273,7 @@
   __ sll(Rret, LogBytesPerWord, Rret);
   __ ld_ptr(Rtemp, Rret, Rret);  // get return address
 
-  __ ld_ptr(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
+  __ load_heap_oop(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
   __ null_check(G3_method_handle);
 
   // Adjust Rret first so Llast_SP can be same as Rret
--- a/src/cpu/x86/vm/assembler_x86.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -7709,9 +7709,14 @@
 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
                                               Register temp_reg,
                                               Label& wrong_method_type) {
-  if (UseCompressedOops)  unimplemented();  // field accesses must decode
+  Address type_addr(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg));
   // compare method type against that of the receiver
-  cmpptr(mtype_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
+  if (UseCompressedOops) {
+    load_heap_oop(temp_reg, type_addr);
+    cmpptr(mtype_reg, temp_reg);
+  } else {
+    cmpptr(mtype_reg, type_addr);
+  }
   jcc(Assembler::notEqual, wrong_method_type);
 }
 
@@ -7723,15 +7728,14 @@
 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
                                                 Register temp_reg) {
   assert_different_registers(vmslots_reg, mh_reg, temp_reg);
-  if (UseCompressedOops)  unimplemented();  // field accesses must decode
   // load mh.type.form.vmslots
   if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
     // hoist vmslots into every mh to avoid dependent load chain
     movl(vmslots_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)));
   } else {
     Register temp2_reg = vmslots_reg;
-    movptr(temp2_reg, Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
-    movptr(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)));
+    load_heap_oop(temp2_reg, Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
+    load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)));
     movl(vmslots_reg, Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
   }
 }
@@ -7745,9 +7749,8 @@
   assert(mh_reg == rcx, "caller must put MH object in rcx");
   assert_different_registers(mh_reg, temp_reg);
 
-  if (UseCompressedOops)  unimplemented();  // field accesses must decode
-
   // pick out the interpreted side of the handler
+  // NOTE: vmentry is not an oop!
   movptr(temp_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg)));
 
   // off we go...
@@ -8238,6 +8241,40 @@
     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
 }
 
+void MacroAssembler::load_heap_oop(Register dst, Address src) {
+#ifdef _LP64
+  if (UseCompressedOops) {
+    movl(dst, src);
+    decode_heap_oop(dst);
+  } else
+#endif
+    movptr(dst, src);
+}
+
+void MacroAssembler::store_heap_oop(Address dst, Register src) {
+#ifdef _LP64
+  if (UseCompressedOops) {
+    assert(!dst.uses(src), "not enough registers");
+    encode_heap_oop(src);
+    movl(dst, src);
+  } else
+#endif
+    movptr(dst, src);
+}
+
+// Used for storing NULLs.
+void MacroAssembler::store_heap_oop_null(Address dst) {
+#ifdef _LP64
+  if (UseCompressedOops) {
+    movl(dst, (int32_t)NULL_WORD);
+  } else {
+    movslq(dst, (int32_t)NULL_WORD);
+  }
+#else
+  movl(dst, (int32_t)NULL_WORD);
+#endif
+}
+
 #ifdef _LP64
 void MacroAssembler::store_klass_gap(Register dst, Register src) {
   if (UseCompressedOops) {
@@ -8246,34 +8283,6 @@
   }
 }
 
-void MacroAssembler::load_heap_oop(Register dst, Address src) {
-  if (UseCompressedOops) {
-    movl(dst, src);
-    decode_heap_oop(dst);
-  } else {
-    movq(dst, src);
-  }
-}
-
-void MacroAssembler::store_heap_oop(Address dst, Register src) {
-  if (UseCompressedOops) {
-    assert(!dst.uses(src), "not enough registers");
-    encode_heap_oop(src);
-    movl(dst, src);
-  } else {
-    movq(dst, src);
-  }
-}
-
-// Used for storing NULLs.
-void MacroAssembler::store_heap_oop_null(Address dst) {
-  if (UseCompressedOops) {
-    movl(dst, (int32_t)NULL_WORD);
-  } else {
-    movslq(dst, (int32_t)NULL_WORD);
-  }
-}
-
 #ifdef ASSERT
 void MacroAssembler::verify_heapbase(const char* msg) {
   assert (UseCompressedOops, "should be compressed");
--- a/src/cpu/x86/vm/assembler_x86.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1682,24 +1682,24 @@
   void load_klass(Register dst, Register src);
   void store_klass(Register dst, Register src);
 
+  void load_heap_oop(Register dst, Address src);
+  void store_heap_oop(Address dst, Register src);
+
+  // Used for storing NULL. All other oop constants should be
+  // stored using routines that take a jobject.
+  void store_heap_oop_null(Address dst);
+
   void load_prototype_header(Register dst, Register src);
 
 #ifdef _LP64
   void store_klass_gap(Register dst, Register src);
 
-  void load_heap_oop(Register dst, Address src);
-  void store_heap_oop(Address dst, Register src);
-
   // This dummy is to prevent a call to store_heap_oop from
   // converting a zero (like NULL) into a Register by giving
   // the compiler two choices it can't resolve
 
   void store_heap_oop(Address dst, void* dummy);
 
-  // Used for storing NULL. All other oop constants should be
-  // stored using routines that take a jobject.
-  void store_heap_oop_null(Address dst);
-
   void encode_heap_oop(Register r);
   void decode_heap_oop(Register r);
   void encode_heap_oop_not_null(Register r);
@@ -1927,7 +1927,7 @@
 
   void untested()                                { stop("untested"); }
 
-  void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, sizeof(b), "unimplemented: %s", what);  stop(b); }
+  void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
 
   void should_not_reach_here()                   { stop("should not reach here"); }
 
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -499,7 +499,7 @@
   Register new_val_reg = new_val()->as_register();
   __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
   __ jcc(Assembler::equal, _continuation);
-  ce->store_parameter(addr()->as_register(), 0);
+  ce->store_parameter(addr()->as_pointer_register(), 0);
   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
   __ jmp(_continuation);
 }
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1941,8 +1941,6 @@
       __ cmpxchgptr(newval, Address(addr, 0));
     } else if (op->code() == lir_cas_int) {
       __ cmpxchgl(newval, Address(addr, 0));
-    } else {
-      LP64_ONLY(__ cmpxchgq(newval, Address(addr, 0)));
     }
 #ifdef _LP64
   } else if (op->code() == lir_cas_long) {
--- a/src/cpu/x86/vm/globals_x86.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/globals_x86.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -63,3 +63,5 @@
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
+
+define_pd_global(bool, UseMembar,            false);
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -123,11 +123,9 @@
   }
 
   // given the MethodType, find out where the MH argument is buried
-  __ movptr(rdx_temp, Address(rax_mtype,
-                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
+  __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
   Register rdx_vmslots = rdx_temp;
-  __ movl(rdx_vmslots, Address(rdx_temp,
-                               __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
+  __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
   __ movptr(rcx_recv, __ argument_address(rdx_vmslots));
 
   trace_method_handle(_masm, "invokeExact");
@@ -154,20 +152,18 @@
                    rcx_argslot, rbx_temp, rdx_temp);
 
   // load up an adapter from the calling type (Java weaves this)
-  __ movptr(rdx_temp, Address(rax_mtype,
-                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
+  __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
   Register rdx_adapter = rdx_temp;
-  // movptr(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
+  // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
   // deal with old JDK versions:
-  __ lea(rdi_temp, Address(rdx_temp,
-                           __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
+  __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
   __ cmpptr(rdi_temp, rdx_temp);
   Label sorry_no_invoke_generic;
-  __ jccb(Assembler::below, sorry_no_invoke_generic);
+  __ jcc(Assembler::below, sorry_no_invoke_generic);
 
-  __ movptr(rdx_adapter, Address(rdi_temp, 0));
+  __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0));
   __ testptr(rdx_adapter, rdx_adapter);
-  __ jccb(Assembler::zero, sorry_no_invoke_generic);
+  __ jcc(Assembler::zero, sorry_no_invoke_generic);
   __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
   // As a trusted first argument, pass the type being called, so the adapter knows
   // the actual types of the arguments and return values.
@@ -431,7 +427,6 @@
   }
 
   address interp_entry = __ pc();
-  if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
 
   trace_method_handle(_masm, entry_name(ek));
 
@@ -489,7 +484,7 @@
   case _invokespecial_mh:
     {
       Register rbx_method = rbx_temp;
-      __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop
+      __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop
       __ verify_oop(rbx_method);
       // same as TemplateTable::invokestatic or invokespecial,
       // minus the CP setup and profiling:
@@ -546,8 +541,8 @@
       __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
       Register rdx_intf  = rdx_temp;
       Register rbx_index = rbx_temp;
-      __ movptr(rdx_intf,  rcx_mh_vmtarget);
-      __ movl(rbx_index,   rcx_dmh_vmindex);
+      __ load_heap_oop(rdx_intf, rcx_mh_vmtarget);
+      __ movl(rbx_index, rcx_dmh_vmindex);
       __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
       __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
 
@@ -602,7 +597,7 @@
                        rax_argslot, rbx_temp, rdx_temp);
 
       // store bound argument into the new stack slot:
-      __ movptr(rbx_temp, rcx_bmh_argument);
+      __ load_heap_oop(rbx_temp, rcx_bmh_argument);
       Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
       if (arg_type == T_OBJECT) {
         __ movptr(Address(rax_argslot, 0), rbx_temp);
@@ -620,11 +615,11 @@
 
       if (direct_to_method) {
         Register rbx_method = rbx_temp;
-        __ movptr(rbx_method, rcx_mh_vmtarget);
+        __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
         __ verify_oop(rbx_method);
         __ jmp(rbx_method_fie);
       } else {
-        __ movptr(rcx_recv, rcx_mh_vmtarget);
+        __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
         __ verify_oop(rcx_recv);
         __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
       }
@@ -634,7 +629,7 @@
   case _adapter_retype_only:
   case _adapter_retype_raw:
     // immediately jump to the next MH layer:
-    __ movptr(rcx_recv, rcx_mh_vmtarget);
+    __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
     __ verify_oop(rcx_recv);
     __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     // This is OK when all parameter types widen.
@@ -651,13 +646,13 @@
       vmarg = __ argument_address(rax_argslot);
 
       // What class are we casting to?
-      __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
-      __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
+      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
+      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
 
       Label done;
       __ movptr(rdx_temp, vmarg);
       __ testptr(rdx_temp, rdx_temp);
-      __ jccb(Assembler::zero, done);         // no cast if null
+      __ jcc(Assembler::zero, done);         // no cast if null
       __ load_klass(rdx_temp, rdx_temp);
 
       // live at this point:
@@ -672,14 +667,15 @@
       __ movl(rax_argslot, rcx_amh_vmargslot);  // reload argslot field
       __ movptr(rdx_temp, vmarg);
 
-      __ pushptr(rcx_amh_argument); // required class
-      __ push(rdx_temp);            // bad object
-      __ push((int)Bytecodes::_checkcast);  // who is complaining?
+      __ load_heap_oop(rbx_klass, rcx_amh_argument); // required class
+      __ push(rbx_klass);
+      __ push(rdx_temp);                             // bad object
+      __ push((int)Bytecodes::_checkcast);           // who is complaining?
       __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
 
       __ bind(done);
       // get the new MH:
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
     break;
@@ -741,7 +737,7 @@
       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 
       // get the new MH:
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       // (now we are done with the old MH)
 
       // original 32-bit vmdata word must be of this form:
@@ -816,7 +812,7 @@
         ShouldNotReachHere();
       }
 
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
     break;
@@ -858,7 +854,7 @@
                          rax_argslot, rbx_temp, rdx_temp);
       }
 
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
     break;
@@ -969,7 +965,7 @@
         }
       }
 
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
     break;
@@ -1029,7 +1025,7 @@
 
       __ pop(rdi);              // restore temp
 
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
     break;
@@ -1052,7 +1048,7 @@
 
       __ pop(rdi);              // restore temp
 
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     }
     break;
@@ -1103,8 +1099,8 @@
 
       // Check the array type.
       Register rbx_klass = rbx_temp;
-      __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
-      __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
+      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
+      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
 
       Label ok_array_klass, bad_array_klass, bad_array_length;
       __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
@@ -1186,7 +1182,7 @@
 
       // Arguments are spread.  Move to next method handle.
       UNPUSH_RSI_RDI;
-      __ movptr(rcx_recv, rcx_mh_vmtarget);
+      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
       __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
 
       __ bind(bad_array_klass);
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -35,7 +35,7 @@
 
 // MethodHandles adapters
 enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 26000
+  method_handles_adapters_code_size = 40000
 };
 
 class x86 {
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -3111,19 +3111,22 @@
 
   // rax: CallSite object (f1)
   // rbx: unused (f2)
+  // rcx: receiver address
   // rdx: flags (unused)
 
+  Register rax_callsite      = rax;
+  Register rcx_method_handle = rcx;
+
   if (ProfileInterpreter) {
-    Label L;
     // %%% should make a type profile for any invokedynamic that takes a ref argument
     // profile this call
     __ profile_call(rsi);
   }
 
-  __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
-  __ null_check(rcx);
+  __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+  __ null_check(rcx_method_handle);
   __ prepare_to_jump_from_interpreted();
-  __ jump_to_method_handle_entry(rcx, rdx);
+  __ jump_to_method_handle_entry(rcx_method_handle, rdx);
 }
 
 //----------------------------------------------------------------------------------------------------
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -3120,17 +3120,19 @@
   // rcx: receiver address
   // rdx: flags (unused)
 
+  Register rax_callsite      = rax;
+  Register rcx_method_handle = rcx;
+
   if (ProfileInterpreter) {
-    Label L;
     // %%% should make a type profile for any invokedynamic that takes a ref argument
     // profile this call
     __ profile_call(r13);
   }
 
-  __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
-  __ null_check(rcx);
+  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+  __ null_check(rcx_method_handle);
   __ prepare_to_jump_from_interpreted();
-  __ jump_to_method_handle_entry(rcx, rdx);
+  __ jump_to_method_handle_entry(rcx_method_handle, rdx);
 }
 
 
--- a/src/cpu/zero/vm/globals_zero.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/zero/vm/globals_zero.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -45,3 +45,5 @@
 
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
+
+define_pd_global(bool,  UseMembar,            false);
--- a/src/cpu/zero/vm/interpreterRT_zero.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/cpu/zero/vm/interpreterRT_zero.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -92,15 +92,15 @@
 
  public:
   SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer)
-    : SignatureHandlerGeneratorBase(method, (ffi_cif *) buffer->code_end()),
+    : SignatureHandlerGeneratorBase(method, (ffi_cif *) buffer->insts_end()),
       _cb(buffer) {
-    _cb->set_code_end((address) (cif() + 1));
+    _cb->set_insts_end((address) (cif() + 1));
   }
 
  private:
   void push(intptr_t value) {
-    intptr_t *dst = (intptr_t *) _cb->code_end();
-    _cb->set_code_end((address) (dst + 1));
+    intptr_t *dst = (intptr_t *) _cb->insts_end();
+    _cb->set_insts_end((address) (dst + 1));
     *dst = value;
   }
 };
--- a/src/os/linux/vm/attachListener_linux.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/os/linux/vm/attachListener_linux.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -176,10 +176,10 @@
 
   int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
                    os::get_temp_directory(), os::current_process_id());
-  if (n <= (int)UNIX_PATH_MAX) {
+  if (n < (int)UNIX_PATH_MAX) {
     n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
   }
-  if (n > (int)UNIX_PATH_MAX) {
+  if (n >= (int)UNIX_PATH_MAX) {
     return -1;
   }
 
--- a/src/os/linux/vm/objectMonitor_linux.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-
-/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/src/os/linux/vm/objectMonitor_linux.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
- private:
--- a/src/os/linux/vm/objectMonitor_linux.inline.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/src/os/linux/vm/os_linux.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -827,8 +827,10 @@
 
       switch (thr_type) {
       case os::java_thread:
-        // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
-        if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
+        // Java threads use ThreadStackSize which default value can be
+        // changed with the flag -Xss
+        assert (JavaThread::stack_size_at_create() > 0, "this should be set");
+        stack_size = JavaThread::stack_size_at_create();
         break;
       case os::compiler_thread:
         if (CompilerThreadStackSize > 0) {
@@ -3922,12 +3924,21 @@
   Linux::signal_sets_init();
   Linux::install_signal_handlers();
 
+  // Check minimum allowable stack size for thread creation and to initialize
+  // the java system classes, including StackOverflowError - depends on page
+  // size.  Add a page for compiler2 recursion in main thread.
+  // Add in 2*BytesPerWord times page size to account for VM stack during
+  // class initialization depending on 32 or 64 bit VM.
+  os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
+
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
-      threadStackSizeInBytes < Linux::min_stack_allowed) {
+      threadStackSizeInBytes < os::Linux::min_stack_allowed) {
         tty->print_cr("\nThe stack size specified is too small, "
                       "Specify at least %dk",
-                      Linux::min_stack_allowed / K);
+                      os::Linux::min_stack_allowed/ K);
         return JNI_ERR;
   }
 
@@ -4839,7 +4850,7 @@
 
   // Next, demultiplex/decode time arguments
   timespec absTime;
-  if (time < 0) { // don't wait at all
+  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
     return;
   }
   if (time > 0) {
--- a/src/os/solaris/vm/objectMonitor_solaris.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/src/os/solaris/vm/objectMonitor_solaris.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
- private:
--- a/src/os/solaris/vm/objectMonitor_solaris.inline.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4878,18 +4878,17 @@
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
   // size.  Add a page for compiler2 recursion in main thread.
-  // Add in BytesPerWord times page size to account for VM stack during
+  // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
-  guarantee((Solaris::min_stack_allowed >=
-    (StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord
-     COMPILER2_PRESENT(+1)) * page_size),
-    "need to increase Solaris::min_stack_allowed on this platform");
+  os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                    2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
-    threadStackSizeInBytes < Solaris::min_stack_allowed) {
+    threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
-                  Solaris::min_stack_allowed/K);
+                  os::Solaris::min_stack_allowed/K);
     return JNI_ERR;
   }
 
@@ -5837,7 +5836,7 @@
 
   // First, demultiplex/decode time arguments
   timespec absTime;
-  if (time < 0) { // don't wait at all
+  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
     return;
   }
   if (time > 0) {
--- a/src/os/windows/vm/objectMonitor_windows.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "incls/_precompiled.incl"
--- a/src/os/windows/vm/objectMonitor_windows.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
- private:
--- a/src/os/windows/vm/objectMonitor_windows.inline.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
--- a/src/os/windows/vm/os_windows.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -3311,7 +3311,6 @@
   }
 }
 
-
 // this is called _after_ the global arguments have been parsed
 jint os::init_2(void) {
   // Allocate a single page and mark it as readable for safepoint polling
@@ -3390,6 +3389,21 @@
     actual_reserve_size = default_reserve_size;
   }
 
+  // Check minimum allowable stack size for thread creation and to initialize
+  // the java system classes, including StackOverflowError - depends on page
+  // size.  Add a page for compiler2 recursion in main thread.
+  // Add in 2*BytesPerWord times page size to account for VM stack during
+  // class initialization depending on 32 or 64 bit VM.
+  size_t min_stack_allowed =
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+            2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
+  if (actual_reserve_size < min_stack_allowed) {
+    tty->print_cr("\nThe stack size specified is too small, "
+                  "Specify at least %dk",
+                  min_stack_allowed / K);
+    return JNI_ERR;
+  }
+
   JavaThread::set_stack_size_at_create(stack_commit_size);
 
   // Calculate theoretical max. size of Threads to guard gainst artifical
@@ -3992,7 +4006,7 @@
   if (time < 0) { // don't wait
     return;
   }
-  else if (time == 0) {
+  else if (time == 0 && !isAbsolute) {
     time = INFINITE;
   }
   else if  (isAbsolute) {
--- a/src/share/vm/asm/codeBuffer.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/asm/codeBuffer.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -168,8 +168,8 @@
   bool allocates(address pc) const  { return pc >= _start && pc <  _limit; }
   bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
 
-  void    set_end(address pc)       { assert(allocates2(pc),""); _end = pc; }
-  void    set_mark(address pc)      { assert(contains2(pc),"not in codeBuffer");
+  void    set_end(address pc)       { assert(allocates2(pc), err_msg("not in CodeBuffer memory: " PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, _start, pc, _limit)); _end = pc; }
+  void    set_mark(address pc)      { assert(contains2(pc), "not in codeBuffer");
                                       _mark = pc; }
   void    set_mark_off(int offset)  { assert(contains2(offset+_start),"not in codeBuffer");
                                       _mark = offset + _start; }
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -471,7 +471,7 @@
     ciField* field = fields->at(i);
     int offset = field->offset_in_bytes();
     int size   = (field->_type == NULL) ? heapOopSize : field->size_in_bytes();
-    assert(last_offset <= offset, "no field overlap");
+    assert(last_offset <= offset, err_msg("no field overlap: %d <= %d", last_offset, offset));
     if (last_offset > (int)sizeof(oopDesc))
       assert((offset - last_offset) < BytesPerLong, "no big holes");
     // Note:  Two consecutive T_BYTE fields will be separated by wordSize-1
--- a/src/share/vm/ci/ciTypeFlow.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1945,7 +1945,7 @@
   _has_irreducible_entry = false;
   _osr_bci = osr_bci;
   _failure_reason = NULL;
-  assert(start_bci() >= 0 && start_bci() < code_size() , "correct osr_bci argument");
+  assert(0 <= start_bci() && start_bci() < code_size() , err_msg("correct osr_bci argument: 0 <= %d < %d", start_bci(), code_size()));
   _work_list = NULL;
 
   _ciblock_count = _methodBlocks->num_blocks();
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -2702,13 +2702,15 @@
       // Adjust the field type from byte to an unmanaged pointer.
       assert(fac_ptr->nonstatic_byte_count > 0, "");
       fac_ptr->nonstatic_byte_count -= 1;
-      (*fields_ptr)->ushort_at_put(i + instanceKlass::signature_index_offset,
-                                   word_sig_index);
-      fac_ptr->nonstatic_word_count += 1;
+
+      (*fields_ptr)->ushort_at_put(i + instanceKlass::signature_index_offset, word_sig_index);
+      assert(wordSize == longSize || wordSize == jintSize, "ILP32 or LP64");
+      if (wordSize == longSize)  fac_ptr->nonstatic_double_count += 1;
+      else                       fac_ptr->nonstatic_word_count   += 1;
 
       FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i + instanceKlass::low_offset);
       assert(atype == NONSTATIC_BYTE, "");
-      FieldAllocationType new_atype = NONSTATIC_WORD;
+      FieldAllocationType new_atype = (wordSize == longSize) ? NONSTATIC_DOUBLE : NONSTATIC_WORD;
       (*fields_ptr)->ushort_at_put(i + instanceKlass::low_offset, new_atype);
 
       found_vmentry = true;
@@ -4307,20 +4309,21 @@
 }
 
 
-// Unqualified names may not contain the characters '.', ';', or '/'.
-// Method names also may not contain the characters '<' or '>', unless <init> or <clinit>.
-// Note that method names may not be <init> or <clinit> in this method.
-// Because these names have been checked as special cases before calling this method
-// in verify_legal_method_name.
-bool ClassFileParser::verify_unqualified_name(char* name, unsigned int length, int type) {
+// Unqualified names may not contain the characters '.', ';', '[', or '/'.
+// Method names also may not contain the characters '<' or '>', unless <init>
+// or <clinit>.  Note that method names may not be <init> or <clinit> in this
+// method.  Because these names have been checked as special cases before
+// calling this method in verify_legal_method_name.
+bool ClassFileParser::verify_unqualified_name(
+    char* name, unsigned int length, int type) {
   jchar ch;
 
   for (char* p = name; p != name + length; ) {
     ch = *p;
     if (ch < 128) {
       p++;
-      if (ch == '.' || ch == ';') {
-        return false;   // do not permit '.' or ';'
+      if (ch == '.' || ch == ';' || ch == '[' ) {
+        return false;   // do not permit '.', ';', or '['
       }
       if (type != LegalClass && ch == '/') {
         return false;   // do not permit '/' unless it's class name
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/stackMapTableFormat.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,916 @@
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// These classes represent the stack-map substructures described in the JVMS
+// (hence the non-conforming naming scheme).
+
+// These classes work with the types in their compressed form in-place (as they
+// would appear in the classfile).  No virtual methods or fields allowed.
+
+class verification_type_info {
+ private:
+  // u1 tag
+  // u2 cpool_index || u2 bci (for ITEM_Object & ITEM_Uninitailized only)
+
+  address tag_addr() const { return (address)this; }
+  address cpool_index_addr() const { return tag_addr() + sizeof(u1); }
+  address bci_addr() const { return cpool_index_addr(); }
+
+ protected:
+  // No constructors  - should be 'private', but GCC issues a warning if it is
+  verification_type_info() {}
+  verification_type_info(const verification_type_info&) {}
+
+ public:
+
+  static verification_type_info* at(address addr) {
+    return (verification_type_info*)addr;
+  }
+
+  static verification_type_info* create_at(address addr, u1 tag) {
+    verification_type_info* vti = (verification_type_info*)addr;
+    vti->set_tag(tag);
+    return vti;
+  }
+
+  static verification_type_info* create_object_at(address addr, u2 cp_idx) {
+    verification_type_info* vti = (verification_type_info*)addr;
+    vti->set_tag(ITEM_Object);
+    vti->set_cpool_index(cp_idx);
+    return vti;
+  }
+
+  static verification_type_info* create_uninit_at(address addr, u2 bci) {
+    verification_type_info* vti = (verification_type_info*)addr;
+    vti->set_tag(ITEM_Uninitialized);
+    vti->set_bci(bci);
+    return vti;
+  }
+
+  static size_t calculate_size(u1 tag) {
+    if (tag == ITEM_Object || tag == ITEM_Uninitialized) {
+      return sizeof(u1) + sizeof(u2);
+    } else {
+      return sizeof(u1);
+    }
+  }
+
+  static size_t max_size() { return sizeof(u1) + sizeof(u2); }
+
+  u1 tag() const { return *(u1*)tag_addr(); }
+  void set_tag(u1 tag) { *((u1*)tag_addr()) = tag; }
+
+  bool is_object() const { return tag() == ITEM_Object; }
+  bool is_uninitialized() const { return tag() == ITEM_Uninitialized; }
+
+  u2 cpool_index() const {
+    assert(is_object(), "This type has no cp_index");
+    return Bytes::get_Java_u2(cpool_index_addr());
+  }
+  void set_cpool_index(u2 idx) {
+    assert(is_object(), "This type has no cp_index");
+    Bytes::put_Java_u2(cpool_index_addr(), idx);
+  }
+
+  u2 bci() const {
+    assert(is_uninitialized(), "This type has no bci");
+    return Bytes::get_Java_u2(bci_addr());
+  }
+
+  void set_bci(u2 bci) {
+    assert(is_uninitialized(), "This type has no bci");
+    Bytes::put_Java_u2(bci_addr(), bci);
+  }
+
+  void copy_from(verification_type_info* from) {
+    set_tag(from->tag());
+    if (from->is_object()) {
+      set_cpool_index(from->cpool_index());
+    } else if (from->is_uninitialized()) {
+      set_bci(from->bci());
+    }
+  }
+
+  size_t size() const {
+    return calculate_size(tag());
+  }
+
+  verification_type_info* next() {
+    return (verification_type_info*)((address)this + size());
+  }
+
+  // This method is used when reading unverified data in order to ensure
+  // that we don't read past a particular memory limit.  It returns false
+  // if any part of the data structure is outside the specified memory bounds.
+  bool verify(address start, address end) {
+    return ((address)this >= start &&
+            (address)this < end &&
+            (bci_addr() + sizeof(u2) <= end ||
+                !is_object() && !is_uninitialized()));
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) {
+    switch (tag()) {
+      case ITEM_Top: st->print("Top"); break;
+      case ITEM_Integer: st->print("Integer"); break;
+      case ITEM_Float: st->print("Float"); break;
+      case ITEM_Double: st->print("Double"); break;
+      case ITEM_Long: st->print("Long"); break;
+      case ITEM_Null: st->print("Null"); break;
+      case ITEM_UninitializedThis:
+        st->print("UninitializedThis"); break;
+      case ITEM_Uninitialized:
+        st->print("Uninitialized[#%d]", bci()); break;
+      case ITEM_Object:
+        st->print("Object[#%d]", cpool_index()); break;
+      default:
+        assert(false, "Bad verification_type_info");
+    }
+  }
+#endif
+};
+
+#define FOR_EACH_STACKMAP_FRAME_TYPE(macro, arg1, arg2) \
+  macro(same_frame, arg1, arg2) \
+  macro(same_frame_extended, arg1, arg2) \
+  macro(same_frame_1_stack_item_frame, arg1, arg2) \
+  macro(same_frame_1_stack_item_extended, arg1, arg2) \
+  macro(chop_frame, arg1, arg2) \
+  macro(append_frame, arg1, arg2) \
+  macro(full_frame, arg1, arg2)
+
+#define SM_FORWARD_DECL(type, arg1, arg2) class type;
+FOR_EACH_STACKMAP_FRAME_TYPE(SM_FORWARD_DECL, x, x)
+#undef SM_FORWARD_DECL
+
+class stack_map_frame {
+ protected:
+  address frame_type_addr() const { return (address)this; }
+
+  // No constructors  - should be 'private', but GCC issues a warning if it is
+  stack_map_frame() {}
+  stack_map_frame(const stack_map_frame&) {}
+
+ public:
+
+  static stack_map_frame* at(address addr) {
+    return (stack_map_frame*)addr;
+  }
+
+  stack_map_frame* next() const {
+    return at((address)this + size());
+  }
+
+  u1 frame_type() const { return *(u1*)frame_type_addr(); }
+  void set_frame_type(u1 type) { *((u1*)frame_type_addr()) = type; }
+
+  // pseudo-virtual methods
+  inline size_t size() const;
+  inline int offset_delta() const;
+  inline void set_offset_delta(int offset_delta);
+  inline int number_of_types() const; // number of types contained in the frame
+  inline verification_type_info* types() const; // pointer to first type
+  inline bool is_valid_offset(int offset_delta) const;
+
+  // This method must be used when reading unverified data in order to ensure
+  // that we don't read past a particular memory limit.  It returns false
+  // if any part of the data structure is outside the specified memory bounds.
+  inline bool verify(address start, address end) const;
+#ifdef ASSERT
+  inline void print_on(outputStream* st) const;
+#endif
+
+  // Create as_xxx and is_xxx methods for the subtypes
+#define FRAME_TYPE_DECL(stackmap_frame_type, arg1, arg2) \
+  inline stackmap_frame_type* as_##stackmap_frame_type() const; \
+  bool is_##stackmap_frame_type() { \
+    return as_##stackmap_frame_type() != NULL; \
+  }
+
+  FOR_EACH_STACKMAP_FRAME_TYPE(FRAME_TYPE_DECL, x, x)
+#undef FRAME_TYPE_DECL
+};
+
+class same_frame : public stack_map_frame {
+ private:
+  static int frame_type_to_offset_delta(u1 frame_type) {
+      return frame_type + 1; }
+  static u1 offset_delta_to_frame_type(int offset_delta) {
+      return (u1)(offset_delta - 1); }
+
+ public:
+
+  static bool is_frame_type(u1 tag) {
+    return tag < 64;
+  }
+
+  static same_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (same_frame*)addr;
+  }
+
+  static same_frame* create_at(address addr, int offset_delta) {
+    same_frame* sm = (same_frame*)addr;
+    sm->set_offset_delta(offset_delta);
+    return sm;
+  }
+
+  static size_t calculate_size() { return sizeof(u1); }
+
+  size_t size() const { return calculate_size(); }
+  int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
+
+  void set_offset_delta(int offset_delta) {
+    assert(offset_delta <= 64, "Offset too large for same_frame");
+    set_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  int number_of_types() const { return 0; }
+  verification_type_info* types() const { return NULL; }
+
+  bool is_valid_offset(int offset_delta) const {
+    return is_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  bool verify_subtype(address start, address end) const {
+    return true;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame(%d)", offset_delta());
+  }
+#endif
+};
+
+class same_frame_extended : public stack_map_frame {
+ private:
+  enum { _frame_id = 251 };
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag == _frame_id;
+  }
+
+  static same_frame_extended* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame type");
+    return (same_frame_extended*)addr;
+  }
+
+  static same_frame_extended* create_at(address addr, u2 offset_delta) {
+    same_frame_extended* sm = (same_frame_extended*)addr;
+    sm->set_frame_type(_frame_id);
+    sm->set_offset_delta(offset_delta);
+    return sm;
+  }
+
+  static size_t calculate_size() { return sizeof(u1) + sizeof(u2); }
+
+  size_t size() const { return calculate_size(); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  int number_of_types() const { return 0; }
+  verification_type_info* types() const { return NULL; }
+  bool is_valid_offset(int offset) const { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    return frame_type_addr() + size() <= end;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame_extended(%d)", offset_delta());
+  }
+#endif
+};
+
+class same_frame_1_stack_item_frame : public stack_map_frame {
+ private:
+  address type_addr() const { return frame_type_addr() + sizeof(u1); }
+
+  static int frame_type_to_offset_delta(u1 frame_type) {
+      return frame_type - 63; }
+  static u1 offset_delta_to_frame_type(int offset_delta) {
+      return (u1)(offset_delta + 63); }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag >= 64 && tag < 128;
+  }
+
+  static same_frame_1_stack_item_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (same_frame_1_stack_item_frame*)addr;
+  }
+
+  static same_frame_1_stack_item_frame* create_at(
+      address addr, int offset_delta, verification_type_info* vti) {
+    same_frame_1_stack_item_frame* sm = (same_frame_1_stack_item_frame*)addr;
+    sm->set_offset_delta(offset_delta);
+    if (vti != NULL) {
+      sm->set_type(vti);
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(verification_type_info* vti) {
+    return sizeof(u1) + vti->size();
+  }
+
+  static size_t max_size() {
+    return sizeof(u1) + verification_type_info::max_size();
+  }
+
+  size_t size() const { return calculate_size(types()); }
+  int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
+
+  void set_offset_delta(int offset_delta) {
+    assert(offset_delta > 0 && offset_delta <= 64,
+           "Offset too large for this frame type");
+    set_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  void set_type(verification_type_info* vti) {
+    verification_type_info* cur = types();
+    cur->copy_from(vti);
+  }
+
+  int number_of_types() const { return 1; }
+  verification_type_info* types() const {
+    return verification_type_info::at(type_addr());
+  }
+
+  bool is_valid_offset(int offset_delta) const {
+    return is_frame_type(offset_delta_to_frame_type(offset_delta));
+  }
+
+  bool verify_subtype(address start, address end) const {
+    return types()->verify(start, end);
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame_1_stack_item_frame(%d,", offset_delta());
+    types()->print_on(st);
+    st->print(")");
+  }
+#endif
+};
+
+class same_frame_1_stack_item_extended : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+  address type_addr() const { return offset_delta_addr() + sizeof(u2); }
+
+  enum { _frame_id = 247 };
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag == _frame_id;
+  }
+
+  static same_frame_1_stack_item_extended* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (same_frame_1_stack_item_extended*)addr;
+  }
+
+  static same_frame_1_stack_item_extended* create_at(
+      address addr, int offset_delta, verification_type_info* vti) {
+    same_frame_1_stack_item_extended* sm =
+       (same_frame_1_stack_item_extended*)addr;
+    sm->set_frame_type(_frame_id);
+    sm->set_offset_delta(offset_delta);
+    if (vti != NULL) {
+      sm->set_type(vti);
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(verification_type_info* vti) {
+    return sizeof(u1) + sizeof(u2) + vti->size();
+  }
+
+  size_t size() const { return calculate_size(types()); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  void set_type(verification_type_info* vti) {
+    verification_type_info* cur = types();
+    cur->copy_from(vti);
+  }
+
+  int number_of_types() const { return 1; }
+  verification_type_info* types() const {
+    return verification_type_info::at(type_addr());
+  }
+  bool is_valid_offset(int offset) { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    return type_addr() < end && types()->verify(start, end);
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("same_frame_1_stack_item_extended(%d,", offset_delta());
+    types()->print_on(st);
+    st->print(")");
+  }
+#endif
+};
+
+class chop_frame : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+
+  static int frame_type_to_chops(u1 frame_type) {
+    int chop = 251 - frame_type;
+    return chop;
+  }
+
+  static u1 chops_to_frame_type(int chop) {
+    return 251 - chop;
+  }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return frame_type_to_chops(tag) > 0 && frame_type_to_chops(tag) < 4;
+  }
+
+  static chop_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (chop_frame*)addr;
+  }
+
+  static chop_frame* create_at(address addr, int offset_delta, int chops) {
+    chop_frame* sm = (chop_frame*)addr;
+    sm->set_chops(chops);
+    sm->set_offset_delta(offset_delta);
+    return sm;
+  }
+
+  static size_t calculate_size() {
+    return sizeof(u1) + sizeof(u2);
+  }
+
+  size_t size() const { return calculate_size(); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  int chops() const {
+    int chops = frame_type_to_chops(frame_type());
+    assert(chops > 0 && chops < 4, "Invalid number of chops in frame");
+    return chops;
+  }
+  void set_chops(int chops) {
+    assert(chops > 0 && chops <= 3, "Bad number of chops");
+    set_frame_type(chops_to_frame_type(chops));
+  }
+
+  int number_of_types() const { return 0; }
+  verification_type_info* types() const { return NULL; }
+  bool is_valid_offset(int offset) { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    return frame_type_addr() + size() <= end;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("chop_frame(%d,%d)", offset_delta(), chops());
+  }
+#endif
+};
+
+class append_frame : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+  address types_addr() const { return offset_delta_addr() + sizeof(u2); }
+
+  static int frame_type_to_appends(u1 frame_type) {
+    int append = frame_type - 251;
+    return append;
+  }
+
+  static u1 appends_to_frame_type(int appends) {
+    assert(appends > 0 && appends < 4, "Invalid append amount");
+    return 251 + appends;
+  }
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return frame_type_to_appends(tag) > 0 && frame_type_to_appends(tag) < 4;
+  }
+
+  static append_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (append_frame*)addr;
+  }
+
+  static append_frame* create_at(
+      address addr, int offset_delta, int appends,
+      verification_type_info* types) {
+    append_frame* sm = (append_frame*)addr;
+    sm->set_appends(appends);
+    sm->set_offset_delta(offset_delta);
+    if (types != NULL) {
+      verification_type_info* cur = sm->types();
+      for (int i = 0; i < appends; ++i) {
+        cur->copy_from(types);
+        cur = cur->next();
+        types = types->next();
+      }
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(int appends, verification_type_info* types) {
+    size_t sz = sizeof(u1) + sizeof(u2);
+    for (int i = 0; i < appends; ++i) {
+      sz += types->size();
+      types = types->next();
+    }
+    return sz;
+  }
+
+  static size_t max_size() {
+    return sizeof(u1) + sizeof(u2) + 3 * verification_type_info::max_size();
+  }
+
+  size_t size() const { return calculate_size(number_of_types(), types()); }
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+
+  void set_appends(int appends) {
+    assert(appends > 0 && appends < 4, "Bad number of appends");
+    set_frame_type(appends_to_frame_type(appends));
+  }
+
+  int number_of_types() const {
+    int appends = frame_type_to_appends(frame_type());
+    assert(appends > 0 && appends < 4, "Invalid number of appends in frame");
+    return appends;
+  }
+  verification_type_info* types() const {
+    return verification_type_info::at(types_addr());
+  }
+  bool is_valid_offset(int offset) const { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    verification_type_info* vti = types();
+    if ((address)vti < end && vti->verify(start, end)) {
+      int nof = number_of_types();
+      vti = vti->next();
+      if (nof < 2 || vti->verify(start, end)) {
+        vti = vti->next();
+        if (nof < 3 || vti->verify(start, end)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("append_frame(%d,", offset_delta());
+    verification_type_info* vti = types();
+    for (int i = 0; i < number_of_types(); ++i) {
+      vti->print_on(st);
+      if (i != number_of_types() - 1) {
+        st->print(",");
+      }
+      vti = vti->next();
+    }
+    st->print(")");
+  }
+#endif
+};
+
+class full_frame : public stack_map_frame {
+ private:
+  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
+  address num_locals_addr() const { return offset_delta_addr() + sizeof(u2); }
+  address locals_addr() const { return num_locals_addr() + sizeof(u2); }
+  address stack_slots_addr(address end_of_locals) const {
+      return end_of_locals; }
+  address stack_addr(address end_of_locals) const {
+      return stack_slots_addr(end_of_locals) + sizeof(u2); }
+
+  enum { _frame_id = 255 };
+
+ public:
+  static bool is_frame_type(u1 tag) {
+    return tag == _frame_id;
+  }
+
+  static full_frame* at(address addr) {
+    assert(is_frame_type(*addr), "Wrong frame id");
+    return (full_frame*)addr;
+  }
+
+  static full_frame* create_at(
+      address addr, int offset_delta, int num_locals,
+      verification_type_info* locals,
+      int stack_slots, verification_type_info* stack) {
+    full_frame* sm = (full_frame*)addr;
+    sm->set_frame_type(_frame_id);
+    sm->set_offset_delta(offset_delta);
+    sm->set_num_locals(num_locals);
+    if (locals != NULL) {
+      verification_type_info* cur = sm->locals();
+      for (int i = 0; i < num_locals; ++i) {
+        cur->copy_from(locals);
+        cur = cur->next();
+        locals = locals->next();
+      }
+      address end_of_locals = (address)cur;
+      sm->set_stack_slots(end_of_locals, stack_slots);
+      cur = sm->stack(end_of_locals);
+      for (int i = 0; i < stack_slots; ++i) {
+        cur->copy_from(stack);
+        cur = cur->next();
+        stack = stack->next();
+      }
+    }
+    return sm;
+  }
+
+  static size_t calculate_size(
+      int num_locals, verification_type_info* locals,
+      int stack_slots, verification_type_info* stack) {
+    size_t sz = sizeof(u1) + sizeof(u2) + sizeof(u2) + sizeof(u2);
+    verification_type_info* vti = locals;
+    for (int i = 0; i < num_locals; ++i) {
+      sz += vti->size();
+      vti = vti->next();
+    }
+    vti = stack;
+    for (int i = 0; i < stack_slots; ++i) {
+      sz += vti->size();
+      vti = vti->next();
+    }
+    return sz;
+  }
+
+  static size_t max_size(int locals, int stack) {
+    return sizeof(u1) + 3 * sizeof(u2) +
+        (locals + stack) * verification_type_info::max_size();
+  }
+
+  size_t size() const {
+    address eol = end_of_locals();
+    return calculate_size(num_locals(), locals(), stack_slots(eol), stack(eol));
+  }
+
+  int offset_delta() const {
+    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
+  }
+  int num_locals() const { return Bytes::get_Java_u2(num_locals_addr()); }
+  verification_type_info* locals() const {
+    return verification_type_info::at(locals_addr());
+  }
+  address end_of_locals() const {
+    verification_type_info* vti = locals();
+    for (int i = 0; i < num_locals(); ++i) {
+      vti = vti->next();
+    }
+    return (address)vti;
+  }
+  int stack_slots(address end_of_locals) const {
+    return Bytes::get_Java_u2(stack_slots_addr(end_of_locals));
+  }
+  verification_type_info* stack(address end_of_locals) const {
+    return verification_type_info::at(stack_addr(end_of_locals));
+  }
+
+  void set_offset_delta(int offset_delta) {
+    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
+  }
+  void set_num_locals(int num_locals) {
+    Bytes::put_Java_u2(num_locals_addr(), num_locals);
+  }
+  void set_stack_slots(address end_of_locals, int stack_slots) {
+    Bytes::put_Java_u2(stack_slots_addr(end_of_locals), stack_slots);
+  }
+
+  // These return only the locals.  Extra processing is required for stack
+  // types of full frames.
+  int number_of_types() const { return num_locals(); }
+  verification_type_info* types() const { return locals(); }
+  bool is_valid_offset(int offset) { return true; }
+
+  bool verify_subtype(address start, address end) const {
+    verification_type_info* vti = types();
+    if ((address)vti >= end) {
+      return false;
+    }
+    int count = number_of_types();
+    for (int i = 0; i < count; ++i) {
+      if (!vti->verify(start, end)) {
+        return false;
+      }
+      vti = vti->next();
+    }
+    address eol = (address)vti;
+    if (eol + sizeof(u2) > end) {
+      return false;
+    }
+    count = stack_slots(eol);
+    vti = stack(eol);
+    for (int i = 0; i < stack_slots(eol); ++i) {
+      if (!vti->verify(start, end)) {
+        return false;
+      }
+      vti = vti->next();
+    }
+    return true;
+  }
+
+#ifdef ASSERT
+  void print_on(outputStream* st) const {
+    st->print("full_frame(%d,{", offset_delta());
+    verification_type_info* vti = locals();
+    for (int i = 0; i < num_locals(); ++i) {
+      vti->print_on(st);
+      if (i != num_locals() - 1) {
+        st->print(",");
+      }
+      vti = vti->next();
+    }
+    st->print("},{");
+    address end_of_locals = (address)vti;
+    vti = stack(end_of_locals);
+    int ss = stack_slots(end_of_locals);
+    for (int i = 0; i < ss; ++i) {
+      vti->print_on(st);
+      if (i != ss - 1) {
+        st->print(",");
+      }
+      vti = vti->next();
+    }
+    st->print("})");
+  }
+#endif
+};
+
+#define VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
+  stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
+  if (item_##stack_frame_type != NULL) { \
+    return item_##stack_frame_type->func_name args;  \
+  }
+
+#define VOID_VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
+  stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
+  if (item_##stack_frame_type != NULL) { \
+    item_##stack_frame_type->func_name args;  \
+    return; \
+  }
+
+size_t stack_map_frame::size() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, size, ());
+  return 0;
+}
+
+int stack_map_frame::offset_delta() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, offset_delta, ());
+  return 0;
+}
+
+void stack_map_frame::set_offset_delta(int offset_delta) {
+  FOR_EACH_STACKMAP_FRAME_TYPE(
+      VOID_VIRTUAL_DISPATCH, set_offset_delta, (offset_delta));
+}
+
+int stack_map_frame::number_of_types() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, number_of_types, ());
+  return 0;
+}
+
+verification_type_info* stack_map_frame::types() const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, types, ());
+  return NULL;
+}
+
+bool stack_map_frame::is_valid_offset(int offset) const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, is_valid_offset, (offset));
+  return true;
+}
+
+bool stack_map_frame::verify(address start, address end) const {
+  if (frame_type_addr() >= start && frame_type_addr() < end) {
+    FOR_EACH_STACKMAP_FRAME_TYPE(
+       VIRTUAL_DISPATCH, verify_subtype, (start, end));
+  }
+  return false;
+}
+
+#ifdef ASSERT
+void stack_map_frame::print_on(outputStream* st) const {
+  FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st));
+}
+#endif
+
+#undef VIRTUAL_DISPATCH
+#undef VOID_VIRTUAL_DISPATCH
+
+#define AS_SUBTYPE_DEF(stack_frame_type, arg1, arg2) \
+stack_frame_type* stack_map_frame::as_##stack_frame_type() const { \
+  if (stack_frame_type::is_frame_type(frame_type())) { \
+    return (stack_frame_type*)this; \
+  } else { \
+    return NULL; \
+  } \
+}
+
+FOR_EACH_STACKMAP_FRAME_TYPE(AS_SUBTYPE_DEF, x, x)
+#undef AS_SUBTYPE_DEF
+
+class stack_map_table_attribute {
+ private:
+  address name_index_addr() const {
+      return (address)this; }
+  address attribute_length_addr() const {
+      return name_index_addr() + sizeof(u2); }
+  address number_of_entries_addr() const {
+      return attribute_length_addr() + sizeof(u4); }
+  address entries_addr() const {
+      return number_of_entries_addr() + sizeof(u2); }
+
+ protected:
+  // No constructors  - should be 'private', but GCC issues a warning if it is
+  stack_map_table_attribute() {}
+  stack_map_table_attribute(const stack_map_table_attribute&) {}
+
+ public:
+
+  static stack_map_table_attribute* at(address addr) {
+    return (stack_map_table_attribute*)addr;
+  }
+
+  u2 name_index() const {
+       return Bytes::get_Java_u2(name_index_addr()); }
+  u4 attribute_length() const {
+      return Bytes::get_Java_u4(attribute_length_addr()); }
+  u2 number_of_entries() const {
+      return Bytes::get_Java_u2(number_of_entries_addr()); }
+  stack_map_frame* entries() const {
+    return stack_map_frame::at(entries_addr());
+  }
+
+  static size_t header_size() {
+      return sizeof(u2) + sizeof(u4);
+  }
+
+  void set_name_index(u2 idx) {
+    Bytes::put_Java_u2(name_index_addr(), idx);
+  }
+  void set_attribute_length(u4 len) {
+    Bytes::put_Java_u4(attribute_length_addr(), len);
+  }
+  void set_number_of_entries(u2 num) {
+    Bytes::put_Java_u2(number_of_entries_addr(), num);
+  }
+};
--- a/src/share/vm/code/nmethod.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/code/nmethod.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1421,7 +1421,7 @@
   }
 
 #ifdef SHARK
-  ((SharkCompiler *) compiler())->free_compiled_method(instructions_begin());
+  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
 #endif // SHARK
 
   ((CodeBlob*)(this))->flush();
--- a/src/share/vm/includeDB_compiler1	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/includeDB_compiler1	Mon Nov 01 10:49:14 2010 -0700
@@ -301,6 +301,7 @@
 c1_MacroAssembler.hpp                   assembler_<arch>.inline.hpp
 
 c1_MacroAssembler_<arch>.cpp            arrayOop.hpp
+c1_MacroAssembler_<arch>.cpp            basicLock.hpp
 c1_MacroAssembler_<arch>.cpp            biasedLocking.hpp
 c1_MacroAssembler_<arch>.cpp            c1_MacroAssembler.hpp
 c1_MacroAssembler_<arch>.cpp            c1_Runtime1.hpp
@@ -309,7 +310,6 @@
 c1_MacroAssembler_<arch>.cpp            markOop.hpp
 c1_MacroAssembler_<arch>.cpp            os.hpp
 c1_MacroAssembler_<arch>.cpp            stubRoutines.hpp
-c1_MacroAssembler_<arch>.cpp            synchronizer.hpp
 c1_MacroAssembler_<arch>.cpp            systemDictionary.hpp
 
 c1_MacroAssembler_<arch>.hpp            generate_platform_dependent_include
--- a/src/share/vm/includeDB_core	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/includeDB_core	Mon Nov 01 10:49:14 2010 -0700
@@ -300,10 +300,17 @@
 barrierSet.inline.hpp                   barrierSet.hpp
 barrierSet.inline.hpp                   cardTableModRefBS.hpp
 
+basicLock.cpp                           basicLock.hpp
+basicLock.cpp                           synchronizer.hpp
+
+basicLock.hpp                           handles.hpp
+basicLock.hpp                           markOop.hpp
+basicLock.hpp                           top.hpp
+
+biasedLocking.cpp                       basicLock.hpp
 biasedLocking.cpp                       biasedLocking.hpp
 biasedLocking.cpp                       klass.inline.hpp
 biasedLocking.cpp                       markOop.hpp
-biasedLocking.cpp                       synchronizer.hpp
 biasedLocking.cpp                       task.hpp
 biasedLocking.cpp                       vframe.hpp
 biasedLocking.cpp                       vmThread.hpp
@@ -404,13 +411,13 @@
 bytecodeInterpreterWithChecks.cpp       bytecodeInterpreter.cpp
 
 bytecodeInterpreter.hpp                 allocation.hpp
+bytecodeInterpreter.hpp                 basicLock.hpp
 bytecodeInterpreter.hpp                 bytes_<arch>.hpp
 bytecodeInterpreter.hpp                 frame.hpp
 bytecodeInterpreter.hpp                 globalDefinitions.hpp
 bytecodeInterpreter.hpp                 globals.hpp
 bytecodeInterpreter.hpp                 methodDataOop.hpp
 bytecodeInterpreter.hpp                 methodOop.hpp
-bytecodeInterpreter.hpp                 synchronizer.hpp
 
 bytecodeInterpreter.inline.hpp          bytecodeInterpreter.hpp
 bytecodeInterpreter.inline.hpp          stubRoutines.hpp
@@ -1667,10 +1674,10 @@
 frame.cpp                               universe.inline.hpp
 
 frame.hpp                               assembler.hpp
+frame.hpp                               basicLock.hpp
 frame.hpp                               methodOop.hpp
 frame.hpp                               monitorChunk.hpp
 frame.hpp                               registerMap.hpp
-frame.hpp                               synchronizer.hpp
 frame.hpp                               top.hpp
 
 frame.inline.hpp                        bytecodeInterpreter.hpp
@@ -2120,6 +2127,7 @@
 interfaceSupport_<os_family>.hpp        generate_platform_dependent_include
 
 interp_masm_<arch_model>.cpp            arrayOop.hpp
+interp_masm_<arch_model>.cpp            basicLock.hpp
 interp_masm_<arch_model>.cpp            biasedLocking.hpp
 interp_masm_<arch_model>.cpp            interp_masm_<arch_model>.hpp
 interp_masm_<arch_model>.cpp            interpreterRuntime.hpp
@@ -2131,7 +2139,6 @@
 interp_masm_<arch_model>.cpp            methodDataOop.hpp
 interp_masm_<arch_model>.cpp            methodOop.hpp
 interp_masm_<arch_model>.cpp            sharedRuntime.hpp
-interp_masm_<arch_model>.cpp            synchronizer.hpp
 interp_masm_<arch_model>.cpp            thread_<os_family>.inline.hpp
 
 interp_masm_<arch_model>.hpp            assembler_<arch>.inline.hpp
@@ -3094,25 +3101,26 @@
 
 objArrayOop.hpp                         arrayOop.hpp
 
+objectMonitor.cpp                       dtrace.hpp
+objectMonitor.cpp                       handles.inline.hpp
+objectMonitor.cpp                       interfaceSupport.hpp
+objectMonitor.cpp                       markOop.hpp
+objectMonitor.cpp                       mutexLocker.hpp
+objectMonitor.cpp                       objectMonitor.hpp
+objectMonitor.cpp                       objectMonitor.inline.hpp
+objectMonitor.cpp                       oop.inline.hpp
+objectMonitor.cpp                       osThread.hpp
+objectMonitor.cpp                       os_<os_family>.inline.hpp
+objectMonitor.cpp                       preserveException.hpp
+objectMonitor.cpp                       resourceArea.hpp
+objectMonitor.cpp                       stubRoutines.hpp
+objectMonitor.cpp                       thread.hpp
+objectMonitor.cpp                       thread_<os_family>.inline.hpp
+objectMonitor.cpp                       threadService.hpp
+objectMonitor.cpp                       vmSymbols.hpp
+
 objectMonitor.hpp                       os.hpp
-
-objectMonitor_<os_family>.cpp           dtrace.hpp
-objectMonitor_<os_family>.cpp           interfaceSupport.hpp
-objectMonitor_<os_family>.cpp           objectMonitor.hpp
-objectMonitor_<os_family>.cpp           objectMonitor.inline.hpp
-objectMonitor_<os_family>.cpp           oop.inline.hpp
-objectMonitor_<os_family>.cpp           osThread.hpp
-objectMonitor_<os_family>.cpp           os_<os_family>.inline.hpp
-objectMonitor_<os_family>.cpp           threadService.hpp
-objectMonitor_<os_family>.cpp           thread_<os_family>.inline.hpp
-objectMonitor_<os_family>.cpp           vmSymbols.hpp
-
-objectMonitor_<os_family>.hpp           generate_platform_dependent_include
-objectMonitor_<os_family>.hpp           os_<os_family>.inline.hpp
-objectMonitor_<os_family>.hpp           thread_<os_family>.inline.hpp
-objectMonitor_<os_family>.hpp           top.hpp
-
-objectMonitor_<os_family>.inline.hpp    generate_platform_dependent_include
+objectMonitor.hpp                       perfData.hpp
 
 oop.cpp                                 copy.hpp
 oop.cpp                                 handles.inline.hpp
@@ -3231,6 +3239,7 @@
 orderAccess.hpp                         os.hpp
 
 orderAccess_<os_arch>.inline.hpp        orderAccess.hpp
+orderAccess_<os_arch>.inline.hpp        vm_version_<arch>.hpp
 
 os.cpp                                  allocation.inline.hpp
 os.cpp                                  arguments.hpp
@@ -3328,7 +3337,6 @@
 os_<os_family>.cpp                      nativeInst_<arch>.hpp
 os_<os_family>.cpp                      no_precompiled_headers
 os_<os_family>.cpp                      objectMonitor.hpp
-os_<os_family>.cpp                      objectMonitor.inline.hpp
 os_<os_family>.cpp                      oop.inline.hpp
 os_<os_family>.cpp                      osThread.hpp
 os_<os_family>.cpp                      os_share_<os_family>.hpp
@@ -3388,6 +3396,12 @@
 ostream.hpp                             allocation.hpp
 ostream.hpp                             timer.hpp
 
+// include thread.hpp to prevent cyclic includes
+park.cpp                                thread.hpp
+
+park.hpp                                debug.hpp
+park.hpp                                globalDefinitions.hpp
+
 pcDesc.cpp                              debugInfoRec.hpp
 pcDesc.cpp                              nmethod.hpp
 pcDesc.cpp                              pcDesc.hpp
@@ -3600,7 +3614,9 @@
 relocator.cpp                           bytecodes.hpp
 relocator.cpp                           handles.inline.hpp
 relocator.cpp                           oop.inline.hpp
+relocator.cpp                           oopFactory.hpp
 relocator.cpp                           relocator.hpp
+relocator.cpp                           stackMapTableFormat.hpp
 relocator.cpp                           universe.inline.hpp
 
 relocator.hpp                           bytecodes.hpp
@@ -3907,6 +3923,8 @@
 stackMapTable.hpp                       methodOop.hpp
 stackMapTable.hpp                       stackMapFrame.hpp
 
+stackMapTableFormat.hpp                 verificationType.hpp
+
 stackValue.cpp                          debugInfo.hpp
 stackValue.cpp                          frame.inline.hpp
 stackValue.cpp                          handles.inline.hpp
@@ -4062,10 +4080,10 @@
 synchronizer.cpp                        resourceArea.hpp
 synchronizer.cpp                        stubRoutines.hpp
 synchronizer.cpp                        synchronizer.hpp
-synchronizer.cpp                        threadService.hpp
 synchronizer.cpp                        thread_<os_family>.inline.hpp
 synchronizer.cpp                        vmSymbols.hpp
 
+synchronizer.hpp                        basicLock.hpp
 synchronizer.hpp                        handles.hpp
 synchronizer.hpp                        markOop.hpp
 synchronizer.hpp                        perfData.hpp
@@ -4237,7 +4255,6 @@
 thread.cpp                              mutexLocker.hpp
 thread.cpp                              objArrayOop.hpp
 thread.cpp                              objectMonitor.hpp
-thread.cpp                              objectMonitor.inline.hpp
 thread.cpp                              oop.inline.hpp
 thread.cpp                              oopFactory.hpp
 thread.cpp                              osThread.hpp
@@ -4275,6 +4292,7 @@
 thread.hpp                              oop.hpp
 thread.hpp                              os.hpp
 thread.hpp                              osThread.hpp
+thread.hpp                              park.hpp
 thread.hpp                              safepoint.hpp
 thread.hpp                              stubRoutines.hpp
 thread.hpp                              threadLocalAllocBuffer.hpp
@@ -4586,6 +4604,7 @@
 vframeArray.hpp                         growableArray.hpp
 vframeArray.hpp                         monitorChunk.hpp
 
+vframe_hp.cpp                           basicLock.hpp
 vframe_hp.cpp                           codeCache.hpp
 vframe_hp.cpp                           debugInfoRec.hpp
 vframe_hp.cpp                           handles.inline.hpp
@@ -4599,7 +4618,6 @@
 vframe_hp.cpp                           scopeDesc.hpp
 vframe_hp.cpp                           signature.hpp
 vframe_hp.cpp                           stubRoutines.hpp
-vframe_hp.cpp                           synchronizer.hpp
 vframe_hp.cpp                           vframeArray.hpp
 vframe_hp.cpp                           vframe_hp.hpp
 
@@ -4751,6 +4769,7 @@
 workgroup.cpp                           workgroup.hpp
 
 workgroup.hpp                           taskqueue.hpp
+
 workgroup.hpp                           thread_<os_family>.inline.hpp
 
 xmlstream.cpp                           allocation.hpp
--- a/src/share/vm/includeDB_features	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/includeDB_features	Mon Nov 01 10:49:14 2010 -0700
@@ -184,6 +184,13 @@
 jvmtiImpl.hpp                           systemDictionary.hpp
 jvmtiImpl.hpp                           vm_operations.hpp
 
+jvmtiRawMonitor.cpp                     interfaceSupport.hpp
+jvmtiRawMonitor.cpp                     jvmtiRawMonitor.hpp
+jvmtiRawMonitor.cpp                     thread.hpp
+
+jvmtiRawMonitor.hpp                     growableArray.hpp
+jvmtiRawMonitor.hpp                     objectMonitor.hpp
+
 jvmtiTagMap.cpp                         biasedLocking.hpp
 jvmtiTagMap.cpp                         javaCalls.hpp
 jvmtiTagMap.cpp                         jniHandles.hpp
--- a/src/share/vm/includeDB_jvmti	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/includeDB_jvmti	Mon Nov 01 10:49:14 2010 -0700
@@ -35,6 +35,7 @@
 // jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
 
 jvmtiEnter.cpp                          jvmtiEnter.hpp
+jvmtiEnter.cpp                          jvmtiRawMonitor.hpp
 jvmtiEnter.cpp                          jvmtiUtil.hpp
 
 jvmtiEnter.hpp                          interfaceSupport.hpp
@@ -44,6 +45,7 @@
 jvmtiEnter.hpp                          systemDictionary.hpp
 
 jvmtiEnterTrace.cpp                     jvmtiEnter.hpp
+jvmtiEnterTrace.cpp                     jvmtiRawMonitor.hpp
 jvmtiEnterTrace.cpp                     jvmtiUtil.hpp
 
 jvmtiEnv.cpp                            arguments.hpp
@@ -66,11 +68,11 @@
 jvmtiEnv.cpp                            jvmtiGetLoadedClasses.hpp
 jvmtiEnv.cpp                            jvmtiImpl.hpp
 jvmtiEnv.cpp                            jvmtiManageCapabilities.hpp
+jvmtiEnv.cpp                            jvmtiRawMonitor.hpp
 jvmtiEnv.cpp                            jvmtiRedefineClasses.hpp
 jvmtiEnv.cpp                            jvmtiTagMap.hpp
 jvmtiEnv.cpp                            jvmtiThreadState.inline.hpp
 jvmtiEnv.cpp                            jvmtiUtil.hpp
-jvmtiEnv.cpp                            objectMonitor.inline.hpp
 jvmtiEnv.cpp                            osThread.hpp
 jvmtiEnv.cpp                            preserveException.hpp
 jvmtiEnv.cpp                            reflectionUtils.hpp
@@ -178,11 +180,13 @@
 jvmtiExport.cpp                         jvmtiExport.hpp
 jvmtiExport.cpp                         jvmtiImpl.hpp
 jvmtiExport.cpp                         jvmtiManageCapabilities.hpp
+jvmtiExport.cpp                         jvmtiRawMonitor.hpp
 jvmtiExport.cpp                         jvmtiTagMap.hpp
 jvmtiExport.cpp                         jvmtiThreadState.inline.hpp
 jvmtiExport.cpp                         nmethod.hpp
 jvmtiExport.cpp                         objArrayKlass.hpp
 jvmtiExport.cpp                         objArrayOop.hpp
+jvmtiExport.cpp                         objectMonitor.hpp
 jvmtiExport.cpp                         objectMonitor.inline.hpp
 jvmtiExport.cpp                         pcDesc.hpp
 jvmtiExport.cpp                         resourceArea.hpp
@@ -210,6 +214,8 @@
 jvmtiManageCapabilities.hpp             allocation.hpp
 jvmtiManageCapabilities.hpp             jvmti.h
 
+// jvmtiRawMonitor is jck optional, please put deps in includeDB_features
+
 jvmtiRedefineClasses.cpp                bitMap.inline.hpp
 jvmtiRedefineClasses.cpp                codeCache.hpp
 jvmtiRedefineClasses.cpp                deoptimization.hpp
--- a/src/share/vm/oops/methodOop.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/oops/methodOop.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -758,7 +758,7 @@
 
   OrderAccess::storestore();
 #ifdef SHARK
-  mh->_from_interpreted_entry = code->instructions_begin();
+  mh->_from_interpreted_entry = code->insts_begin();
 #else
   mh->_from_compiled_entry = code->verified_entry_point();
   OrderAccess::storestore();
--- a/src/share/vm/oops/methodOop.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/oops/methodOop.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -247,6 +247,10 @@
     return constMethod()->stackmap_data();
   }
 
+  void set_stackmap_data(typeArrayOop sd) {
+    constMethod()->set_stackmap_data(sd);
+  }
+
   // exception handler table
   typeArrayOop exception_table() const
                                    { return constMethod()->exception_table(); }
--- a/src/share/vm/oops/oop.inline.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/oops/oop.inline.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -173,7 +173,7 @@
   address base = Universe::narrow_oop_base();
   int    shift = Universe::narrow_oop_shift();
   oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
-  assert(check_obj_alignment(result), "Address not aligned");
+  assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
   return result;
 }
 
--- a/src/share/vm/opto/library_call.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/opto/library_call.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -4761,7 +4761,7 @@
       Node* cv = generate_checkcast_arraycopy(adr_type,
                                               dest_elem_klass,
                                               src, src_offset, dest, dest_offset,
-                                              copy_length);
+                                              ConvI2X(copy_length));
       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
       checked_control = control();
       checked_i_o     = i_o();
@@ -5206,7 +5206,7 @@
   int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
   Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
   Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
-  Node* check_offset = _gvn.transform(n3);
+  Node* check_offset = ConvI2X(_gvn.transform(n3));
   Node* check_value  = dest_elem_klass;
 
   Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
--- a/src/share/vm/opto/loopTransform.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/opto/loopTransform.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -2684,7 +2684,14 @@
                                                       fill_name, TypeAryPtr::get_array_body_type(t));
   call->init_req(TypeFunc::Parms+0, from);
   call->init_req(TypeFunc::Parms+1, store_value);
+#ifdef _LP64
+  len = new (C, 2) ConvI2LNode(len);
+  _igvn.register_new_node_with_optimizer(len);
+#endif
   call->init_req(TypeFunc::Parms+2, len);
+#ifdef _LP64
+  call->init_req(TypeFunc::Parms+3, C->top());
+#endif
   call->init_req( TypeFunc::Control, head->init_control());
   call->init_req( TypeFunc::I_O    , C->top() )        ;   // does no i/o
   call->init_req( TypeFunc::Memory ,  mem_phi->in(LoopNode::EntryControl) );
--- a/src/share/vm/opto/runtime.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/opto/runtime.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -646,12 +646,14 @@
 
 
 const TypeFunc* OptoRuntime::array_fill_Type() {
-  // create input type (domain)
-  const Type** fields = TypeTuple::fields(3);
-  fields[TypeFunc::Parms+0] = TypePtr::NOTNULL;
-  fields[TypeFunc::Parms+1] = TypeInt::INT;
-  fields[TypeFunc::Parms+2] = TypeInt::INT;
-  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms + 3, fields);
+  // create input type (domain): pointer, int, size_t
+  const Type** fields = TypeTuple::fields(3 LP64_ONLY( + 1));
+  int argp = TypeFunc::Parms;
+  fields[argp++] = TypePtr::NOTNULL;
+  fields[argp++] = TypeInt::INT;
+  fields[argp++] = TypeX_X;               // size in whatevers (size_t)
+  LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
+  const TypeTuple *domain = TypeTuple::make(argp, fields);
 
   // create result type
   fields = TypeTuple::fields(1);
--- a/src/share/vm/prims/jvmtiImpl.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -25,26 +25,6 @@
 # include "incls/_precompiled.incl"
 # include "incls/_jvmtiImpl.cpp.incl"
 
-GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
-
-void JvmtiPendingMonitors::transition_raw_monitors() {
-  assert((Threads::number_of_threads()==1),
-         "Java thread has not created yet or more than one java thread \
-is running. Raw monitor transition will not work");
-  JavaThread *current_java_thread = JavaThread::current();
-  assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
-  {
-    ThreadBlockInVM __tbivm(current_java_thread);
-    for(int i=0; i< count(); i++) {
-      JvmtiRawMonitor *rmonitor = monitors()->at(i);
-      int r = rmonitor->raw_enter(current_java_thread);
-      assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
-    }
-  }
-  // pending monitors are converted to real monitor so delete them all.
-  dispose();
-}
-
 //
 // class JvmtiAgentThread
 //
@@ -216,57 +196,6 @@
   }
 }
 
-
-//
-// class JvmtiRawMonitor
-//
-
-JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
-#ifdef ASSERT
-  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
-#else
-  _name = NULL;
-#endif
-  _magic = JVMTI_RM_MAGIC;
-}
-
-JvmtiRawMonitor::~JvmtiRawMonitor() {
-#ifdef ASSERT
-  FreeHeap(_name);
-#endif
-  _magic = 0;
-}
-
-
-bool
-JvmtiRawMonitor::is_valid() {
-  int value = 0;
-
-  // This object might not be a JvmtiRawMonitor so we can't assume
-  // the _magic field is properly aligned. Get the value in a safe
-  // way and then check against JVMTI_RM_MAGIC.
-
-  switch (sizeof(_magic)) {
-  case 2:
-    value = Bytes::get_native_u2((address)&_magic);
-    break;
-
-  case 4:
-    value = Bytes::get_native_u4((address)&_magic);
-    break;
-
-  case 8:
-    value = Bytes::get_native_u8((address)&_magic);
-    break;
-
-  default:
-    guarantee(false, "_magic field is an unexpected size");
-  }
-
-  return value == JVMTI_RM_MAGIC;
-}
-
-
 //
 // class JvmtiBreakpoint
 //
--- a/src/share/vm/prims/jvmtiImpl.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/prims/jvmtiImpl.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -26,7 +26,6 @@
 // Forward Declarations
 //
 
-class JvmtiRawMonitor;
 class JvmtiBreakpoint;
 class JvmtiBreakpoints;
 
@@ -327,76 +326,6 @@
     return false;
 }
 
-
-///////////////////////////////////////////////////////////////
-//
-// class JvmtiRawMonitor
-//
-// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
-//
-// Wrapper for ObjectMonitor class that saves the Monitor's name
-//
-
-class JvmtiRawMonitor : public ObjectMonitor  {
-private:
-  int           _magic;
-  char *        _name;
-  // JVMTI_RM_MAGIC is set in contructor and unset in destructor.
-  enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
-
-public:
-  JvmtiRawMonitor(const char *name);
-  ~JvmtiRawMonitor();
-  int            magic()   { return _magic;  }
-  const char *get_name()   { return _name; }
-  bool        is_valid();
-};
-
-// Onload pending raw monitors
-// Class is used to cache onload or onstart monitor enter
-// which will transition into real monitor when
-// VM is fully initialized.
-class JvmtiPendingMonitors : public AllStatic {
-
-private:
-  static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
-
-  inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
-
-  static void dispose() {
-    delete monitors();
-  }
-
-public:
-  static void enter(JvmtiRawMonitor *monitor) {
-    monitors()->append(monitor);
-  }
-
-  static int count() {
-    return monitors()->length();
-  }
-
-  static void destroy(JvmtiRawMonitor *monitor) {
-    while (monitors()->contains(monitor)) {
-      monitors()->remove(monitor);
-    }
-  }
-
-  // Return false if monitor is not found in the list.
-  static bool exit(JvmtiRawMonitor *monitor) {
-    if (monitors()->contains(monitor)) {
-      monitors()->remove(monitor);
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  static void transition_raw_monitors();
-};
-
-
-
 ///////////////////////////////////////////////////////////////
 // The get/set local operations must only be done by the VM thread
 // because the interpreter version needs to access oop maps, which can
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/prims/jvmtiRawMonitor.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_jvmtiRawMonitor.cpp.incl"
+
+GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
+
+void JvmtiPendingMonitors::transition_raw_monitors() {
+  assert((Threads::number_of_threads()==1),
+         "Java thread has not created yet or more than one java thread \
+is running. Raw monitor transition will not work");
+  JavaThread *current_java_thread = JavaThread::current();
+  assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
+  {
+    ThreadBlockInVM __tbivm(current_java_thread);
+    for(int i=0; i< count(); i++) {
+      JvmtiRawMonitor *rmonitor = monitors()->at(i);
+      int r = rmonitor->raw_enter(current_java_thread);
+      assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
+    }
+  }
+  // pending monitors are converted to real monitor so delete them all.
+  dispose();
+}
+
+//
+// class JvmtiRawMonitor
+//
+
+JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
+#ifdef ASSERT
+  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
+#else
+  _name = NULL;
+#endif
+  _magic = JVMTI_RM_MAGIC;
+}
+
+JvmtiRawMonitor::~JvmtiRawMonitor() {
+#ifdef ASSERT
+  FreeHeap(_name);
+#endif
+  _magic = 0;
+}
+
+
+bool
+JvmtiRawMonitor::is_valid() {
+  int value = 0;
+
+  // This object might not be a JvmtiRawMonitor so we can't assume
+  // the _magic field is properly aligned. Get the value in a safe
+  // way and then check against JVMTI_RM_MAGIC.
+
+  switch (sizeof(_magic)) {
+  case 2:
+    value = Bytes::get_native_u2((address)&_magic);
+    break;
+
+  case 4:
+    value = Bytes::get_native_u4((address)&_magic);
+    break;
+
+  case 8:
+    value = Bytes::get_native_u8((address)&_magic);
+    break;
+
+  default:
+    guarantee(false, "_magic field is an unexpected size");
+  }
+
+  return value == JVMTI_RM_MAGIC;
+}
+
+// -------------------------------------------------------------------------
+// The raw monitor subsystem is entirely distinct from normal
+// java-synchronization or jni-synchronization.  raw monitors are not
+// associated with objects.  They can be implemented in any manner
+// that makes sense.  The original implementors decided to piggy-back
+// the raw-monitor implementation on the existing Java objectMonitor mechanism.
+// This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
+// Specifically, we should not implement raw monitors via java monitors.
+// Time permitting, we should disentangle and deconvolve the two implementations
+// and move the resulting raw monitor implementation over to the JVMTI directories.
+// Ideally, the raw monitor implementation would be built on top of
+// park-unpark and nothing else.
+//
+// raw monitors are used mainly by JVMTI
+// The raw monitor implementation borrows the ObjectMonitor structure,
+// but the operators are degenerate and extremely simple.
+//
+// Mixed use of a single objectMonitor instance -- as both a raw monitor
+// and a normal java monitor -- is not permissible.
+//
+// Note that we use the single RawMonitor_lock to protect queue operations for
+// _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
+// is deprecated and rare, this is not of concern.  The RawMonitor_lock can not
+// be held indefinitely.  The critical sections must be short and bounded.
+//
+// -------------------------------------------------------------------------
+
+int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
+  for (;;) {
+    if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+       return OS_OK ;
+    }
+
+    ObjectWaiter Node (Self) ;
+    Self->_ParkEvent->reset() ;     // strictly optional
+    Node.TState = ObjectWaiter::TS_ENTER ;
+
+    RawMonitor_lock->lock_without_safepoint_check() ;
+    Node._next  = _EntryList ;
+    _EntryList  = &Node ;
+    OrderAccess::fence() ;
+    if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+        _EntryList = Node._next ;
+        RawMonitor_lock->unlock() ;
+        return OS_OK ;
+    }
+    RawMonitor_lock->unlock() ;
+    while (Node.TState == ObjectWaiter::TS_ENTER) {
+       Self->_ParkEvent->park() ;
+    }
+  }
+}
+
+int JvmtiRawMonitor::SimpleExit (Thread * Self) {
+  guarantee (_owner == Self, "invariant") ;
+  OrderAccess::release_store_ptr (&_owner, NULL) ;
+  OrderAccess::fence() ;
+  if (_EntryList == NULL) return OS_OK ;
+  ObjectWaiter * w ;
+
+  RawMonitor_lock->lock_without_safepoint_check() ;
+  w = _EntryList ;
+  if (w != NULL) {
+      _EntryList = w->_next ;
+  }
+  RawMonitor_lock->unlock() ;
+  if (w != NULL) {
+      guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+      ParkEvent * ev = w->_event ;
+      w->TState = ObjectWaiter::TS_RUN ;
+      OrderAccess::fence() ;
+      ev->unpark() ;
+  }
+  return OS_OK ;
+}
+
+int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) {
+  guarantee (_owner == Self  , "invariant") ;
+  guarantee (_recursions == 0, "invariant") ;
+
+  ObjectWaiter Node (Self) ;
+  Node._notified = 0 ;
+  Node.TState    = ObjectWaiter::TS_WAIT ;
+
+  RawMonitor_lock->lock_without_safepoint_check() ;
+  Node._next     = _WaitSet ;
+  _WaitSet       = &Node ;
+  RawMonitor_lock->unlock() ;
+
+  SimpleExit (Self) ;
+  guarantee (_owner != Self, "invariant") ;
+
+  int ret = OS_OK ;
+  if (millis <= 0) {
+    Self->_ParkEvent->park();
+  } else {
+    ret = Self->_ParkEvent->park(millis);
+  }
+
+  // If thread still resides on the waitset then unlink it.
+  // Double-checked locking -- the usage is safe in this context
+  // as we TState is volatile and the lock-unlock operators are
+  // serializing (barrier-equivalent).
+
+  if (Node.TState == ObjectWaiter::TS_WAIT) {
+    RawMonitor_lock->lock_without_safepoint_check() ;
+    if (Node.TState == ObjectWaiter::TS_WAIT) {
+      // Simple O(n) unlink, but performance isn't critical here.
+      ObjectWaiter * p ;
+      ObjectWaiter * q = NULL ;
+      for (p = _WaitSet ; p != &Node; p = p->_next) {
+         q = p ;
+      }
+      guarantee (p == &Node, "invariant") ;
+      if (q == NULL) {
+        guarantee (p == _WaitSet, "invariant") ;
+        _WaitSet = p->_next ;
+      } else {
+        guarantee (p == q->_next, "invariant") ;
+        q->_next = p->_next ;
+      }
+      Node.TState = ObjectWaiter::TS_RUN ;
+    }
+    RawMonitor_lock->unlock() ;
+  }
+
+  guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
+  SimpleEnter (Self) ;
+
+  guarantee (_owner == Self, "invariant") ;
+  guarantee (_recursions == 0, "invariant") ;
+  return ret ;
+}
+
+int JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) {
+  guarantee (_owner == Self, "invariant") ;
+  if (_WaitSet == NULL) return OS_OK ;
+
+  // We have two options:
+  // A. Transfer the threads from the WaitSet to the EntryList
+  // B. Remove the thread from the WaitSet and unpark() it.
+  //
+  // We use (B), which is crude and results in lots of futile
+  // context switching.  In particular (B) induces lots of contention.
+
+  ParkEvent * ev = NULL ;       // consider using a small auto array ...
+  RawMonitor_lock->lock_without_safepoint_check() ;
+  for (;;) {
+      ObjectWaiter * w = _WaitSet ;
+      if (w == NULL) break ;
+      _WaitSet = w->_next ;
+      if (ev != NULL) { ev->unpark(); ev = NULL; }
+      ev = w->_event ;
+      OrderAccess::loadstore() ;
+      w->TState = ObjectWaiter::TS_RUN ;
+      OrderAccess::storeload();
+      if (!All) break ;
+  }
+  RawMonitor_lock->unlock() ;
+  if (ev != NULL) ev->unpark();
+  return OS_OK ;
+}
+
+// Any JavaThread will enter here with state _thread_blocked
+int JvmtiRawMonitor::raw_enter(TRAPS) {
+  TEVENT (raw_enter) ;
+  void * Contended ;
+
+  // don't enter raw monitor if thread is being externally suspended, it will
+  // surprise the suspender if a "suspended" thread can still enter monitor
+  JavaThread * jt = (JavaThread *)THREAD;
+  if (THREAD->is_Java_thread()) {
+    jt->SR_lock()->lock_without_safepoint_check();
+    while (jt->is_external_suspend()) {
+      jt->SR_lock()->unlock();
+      jt->java_suspend_self();
+      jt->SR_lock()->lock_without_safepoint_check();
+    }
+    // guarded by SR_lock to avoid racing with new external suspend requests.
+    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+    jt->SR_lock()->unlock();
+  } else {
+    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+  }
+
+  if (Contended == THREAD) {
+     _recursions ++ ;
+     return OM_OK ;
+  }
+
+  if (Contended == NULL) {
+     guarantee (_owner == THREAD, "invariant") ;
+     guarantee (_recursions == 0, "invariant") ;
+     return OM_OK ;
+  }
+
+  THREAD->set_current_pending_monitor(this);
+
+  if (!THREAD->is_Java_thread()) {
+     // No other non-Java threads besides VM thread would acquire
+     // a raw monitor.
+     assert(THREAD->is_VM_thread(), "must be VM thread");
+     SimpleEnter (THREAD) ;
+   } else {
+     guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
+     for (;;) {
+       jt->set_suspend_equivalent();
+       // cleared by handle_special_suspend_equivalent_condition() or
+       // java_suspend_self()
+       SimpleEnter (THREAD) ;
+
+       // were we externally suspended while we were waiting?
+       if (!jt->handle_special_suspend_equivalent_condition()) break ;
+
+       // This thread was externally suspended
+       //
+       // This logic isn't needed for JVMTI raw monitors,
+       // but doesn't hurt just in case the suspend rules change. This
+           // logic is needed for the JvmtiRawMonitor.wait() reentry phase.
+           // We have reentered the contended monitor, but while we were
+           // waiting another thread suspended us. We don't want to reenter
+           // the monitor while suspended because that would surprise the
+           // thread that suspended us.
+           //
+           // Drop the lock -
+       SimpleExit (THREAD) ;
+
+           jt->java_suspend_self();
+         }
+
+     assert(_owner == THREAD, "Fatal error with monitor owner!");
+     assert(_recursions == 0, "Fatal error with monitor recursions!");
+  }
+
+  THREAD->set_current_pending_monitor(NULL);
+  guarantee (_recursions == 0, "invariant") ;
+  return OM_OK;
+}
+
+// Used mainly for JVMTI raw monitor implementation
+// Also used for JvmtiRawMonitor::wait().
+int JvmtiRawMonitor::raw_exit(TRAPS) {
+  TEVENT (raw_exit) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+  if (_recursions > 0) {
+    --_recursions ;
+    return OM_OK ;
+  }
+
+  void * List = _EntryList ;
+  SimpleExit (THREAD) ;
+
+  return OM_OK;
+}
+
+// Used for JVMTI raw monitor implementation.
+// All JavaThreads will enter here with state _thread_blocked
+
+int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
+  TEVENT (raw_wait) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+
+  // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
+  // The caller must be able to tolerate spurious returns from raw_wait().
+  THREAD->_ParkEvent->reset() ;
+  OrderAccess::fence() ;
+
+  // check interrupt event
+  if (interruptible && Thread::is_interrupted(THREAD, true)) {
+    return OM_INTERRUPTED;
+  }
+
+  intptr_t save = _recursions ;
+  _recursions = 0 ;
+  _waiters ++ ;
+  if (THREAD->is_Java_thread()) {
+    guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
+    ((JavaThread *)THREAD)->set_suspend_equivalent();
+  }
+  int rv = SimpleWait (THREAD, millis) ;
+  _recursions = save ;
+  _waiters -- ;
+
+  guarantee (THREAD == _owner, "invariant") ;
+  if (THREAD->is_Java_thread()) {
+     JavaThread * jSelf = (JavaThread *) THREAD ;
+     for (;;) {
+        if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
+        SimpleExit (THREAD) ;
+        jSelf->java_suspend_self();
+        SimpleEnter (THREAD) ;
+        jSelf->set_suspend_equivalent() ;
+     }
+  }
+  guarantee (THREAD == _owner, "invariant") ;
+
+  if (interruptible && Thread::is_interrupted(THREAD, true)) {
+    return OM_INTERRUPTED;
+  }
+  return OM_OK ;
+}
+
+int JvmtiRawMonitor::raw_notify(TRAPS) {
+  TEVENT (raw_notify) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+  SimpleNotify (THREAD, false) ;
+  return OM_OK;
+}
+
+int JvmtiRawMonitor::raw_notifyAll(TRAPS) {
+  TEVENT (raw_notifyAll) ;
+  if (THREAD != _owner) {
+    return OM_ILLEGAL_MONITOR_STATE;
+  }
+  SimpleNotify (THREAD, true) ;
+  return OM_OK;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/prims/jvmtiRawMonitor.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+//
+// class JvmtiRawMonitor
+//
+// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
+//
+// Wrapper for ObjectMonitor class that saves the Monitor's name
+//
+
+class JvmtiRawMonitor : public ObjectMonitor  {
+private:
+  int           _magic;
+  char *        _name;
+  // JVMTI_RM_MAGIC is set in contructor and unset in destructor.
+  enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
+
+  int       SimpleEnter (Thread * Self) ;
+  int       SimpleExit  (Thread * Self) ;
+  int       SimpleWait  (Thread * Self, jlong millis) ;
+  int       SimpleNotify (Thread * Self, bool All) ;
+
+public:
+  JvmtiRawMonitor(const char *name);
+  ~JvmtiRawMonitor();
+  int       raw_enter(TRAPS);
+  int       raw_exit(TRAPS);
+  int       raw_wait(jlong millis, bool interruptable, TRAPS);
+  int       raw_notify(TRAPS);
+  int       raw_notifyAll(TRAPS);
+  int            magic()   { return _magic;  }
+  const char *get_name()   { return _name; }
+  bool        is_valid();
+};
+
+// Onload pending raw monitors
+// Class is used to cache onload or onstart monitor enter
+// which will transition into real monitor when
+// VM is fully initialized.
+class JvmtiPendingMonitors : public AllStatic {
+
+private:
+  static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
+
+  inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
+
+  static void dispose() {
+    delete monitors();
+  }
+
+public:
+  static void enter(JvmtiRawMonitor *monitor) {
+    monitors()->append(monitor);
+  }
+
+  static int count() {
+    return monitors()->length();
+  }
+
+  static void destroy(JvmtiRawMonitor *monitor) {
+    while (monitors()->contains(monitor)) {
+      monitors()->remove(monitor);
+    }
+  }
+
+  // Return false if monitor is not found in the list.
+  static bool exit(JvmtiRawMonitor *monitor) {
+    if (monitors()->contains(monitor)) {
+      monitors()->remove(monitor);
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  static void transition_raw_monitors();
+};
--- a/src/share/vm/prims/methodHandles.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/prims/methodHandles.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1568,7 +1568,7 @@
     if (ptype != T_INT) {
       int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
       jint value = argument->int_field(value_offset);
-      int vminfo = adapter_subword_vminfo(ptype);
+      int vminfo = adapter_unbox_subword_vminfo(ptype);
       jint subword = truncate_subword_from_vminfo(value, vminfo);
       if (value != subword) {
         err = "bound subword value does not fit into the subword type";
@@ -2018,12 +2018,12 @@
         assert(src == T_INT || is_subword_type(src), "source is not float");
         // Subword-related cases are int -> {boolean,byte,char,short}.
         ek_opt = _adapter_opt_i2i;
-        vminfo = adapter_subword_vminfo(dest);
+        vminfo = adapter_prim_to_prim_subword_vminfo(dest);
         break;
       case 2 *4+ 1:
         if (src == T_LONG && (dest == T_INT || is_subword_type(dest))) {
           ek_opt = _adapter_opt_l2i;
-          vminfo = adapter_subword_vminfo(dest);
+          vminfo = adapter_prim_to_prim_subword_vminfo(dest);
         } else if (src == T_DOUBLE && dest == T_FLOAT) {
           ek_opt = _adapter_opt_d2f;
         } else {
@@ -2051,7 +2051,7 @@
       switch (type2size[dest]) {
       case 1:
         ek_opt = _adapter_opt_unboxi;
-        vminfo = adapter_subword_vminfo(dest);
+        vminfo = adapter_unbox_subword_vminfo(dest);
         break;
       case 2:
         ek_opt = _adapter_opt_unboxl;
--- a/src/share/vm/prims/methodHandles.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/prims/methodHandles.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -226,11 +226,20 @@
   }
 
   enum { CONV_VMINFO_SIGN_FLAG = 0x80 };
-  static int adapter_subword_vminfo(BasicType dest) {
-    if (dest == T_BOOLEAN) return (BitsPerInt -  1);
-    if (dest == T_CHAR)    return (BitsPerInt - 16);
-    if (dest == T_BYTE)    return (BitsPerInt -  8) | CONV_VMINFO_SIGN_FLAG;
-    if (dest == T_SHORT)   return (BitsPerInt - 16) | CONV_VMINFO_SIGN_FLAG;
+  // Shift values for prim-to-prim conversions.
+  static int adapter_prim_to_prim_subword_vminfo(BasicType dest) {
+    if (dest == T_BOOLEAN) return (BitsPerInt - 1);  // boolean is 1 bit
+    if (dest == T_CHAR)    return (BitsPerInt - BitsPerShort);
+    if (dest == T_BYTE)    return (BitsPerInt - BitsPerByte ) | CONV_VMINFO_SIGN_FLAG;
+    if (dest == T_SHORT)   return (BitsPerInt - BitsPerShort) | CONV_VMINFO_SIGN_FLAG;
+    return 0;                   // case T_INT
+  }
+  // Shift values for unboxing a primitive.
+  static int adapter_unbox_subword_vminfo(BasicType dest) {
+    if (dest == T_BOOLEAN) return (BitsPerInt - BitsPerByte );  // implemented as 1 byte
+    if (dest == T_CHAR)    return (BitsPerInt - BitsPerShort);
+    if (dest == T_BYTE)    return (BitsPerInt - BitsPerByte ) | CONV_VMINFO_SIGN_FLAG;
+    if (dest == T_SHORT)   return (BitsPerInt - BitsPerShort) | CONV_VMINFO_SIGN_FLAG;
     return 0;                   // case T_INT
   }
   // Here is the transformation the i2i adapter must perform:
--- a/src/share/vm/runtime/arguments.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -119,11 +119,8 @@
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.version", "1.0", false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name",
                                                                  "Java Virtual Machine Specification",  false));
-  PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
-        JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(),  false));
-  PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(),  true));
 
   // following are JVMTI agent writeable properties.
@@ -151,6 +148,14 @@
   os::init_system_properties_values();
 }
 
+
+  // Update/Initialize System properties after JDK version number is known
+void Arguments::init_version_specific_system_properties() {
+  PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
+        JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
+  PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(),  false));
+}
+
 /**
  * Provide a slightly more user-friendly way of eliminating -XX flags.
  * When a flag is eliminated, it can be added to this list in order to
@@ -1680,7 +1685,8 @@
   bool status = true;
   status = status && verify_min_value(StackYellowPages, 1, "StackYellowPages");
   status = status && verify_min_value(StackRedPages, 1, "StackRedPages");
-  status = status && verify_min_value(StackShadowPages, 1, "StackShadowPages");
+  // greater stack shadow pages can't generate instruction to bang stack
+  status = status && verify_interval(StackShadowPages, 1, 50, "StackShadowPages");
   return status;
 }
 
@@ -2975,6 +2981,13 @@
   UseCompressedOops = false;
 #endif
 
+#if defined(_LP64)
+  if ((DumpSharedSpaces || RequireSharedSpaces) && UseCompressedOops) {
+    // Disable compressed oops with shared spaces
+    UseCompressedOops = false;
+  }
+#endif
+
   // Set object alignment values.
   set_object_alignment();
 
--- a/src/share/vm/runtime/arguments.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/arguments.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -484,6 +484,9 @@
   // System properties
   static void init_system_properties();
 
+  // Update/Initialize System properties after JDK version number is known
+  static void init_version_specific_system_properties();
+
   // Property List manipulation
   static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
   static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/basicLock.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_basicLock.cpp.incl"
+
+void BasicLock::print_on(outputStream* st) const {
+  st->print("monitor");
+}
+
+void BasicLock::move_to(oop obj, BasicLock* dest) {
+  // Check to see if we need to inflate the lock. This is only needed
+  // if an object is locked using "this" lightweight monitor. In that
+  // case, the displaced_header() is unlocked, because the
+  // displaced_header() contains the header for the originally unlocked
+  // object. However the object could have already been inflated. But it
+  // does not matter, the inflation will just a no-op. For other cases,
+  // the displaced header will be either 0x0 or 0x3, which are location
+  // independent, therefore the BasicLock is free to move.
+  //
+  // During OSR we may need to relocate a BasicLock (which contains a
+  // displaced word) from a location in an interpreter frame to a
+  // new location in a compiled frame.  "this" refers to the source
+  // basiclock in the interpreter frame.  "dest" refers to the destination
+  // basiclock in the new compiled frame.  We *always* inflate in move_to().
+  // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
+  // cause performance problems in code that makes heavy use of a small # of
+  // uncontended locks.   (We'd inflate during OSR, and then sync performance
+  // would subsequently plummet because the thread would be forced thru the slow-path).
+  // This problem has been made largely moot on IA32 by inlining the inflated fast-path
+  // operations in Fast_Lock and Fast_Unlock in i486.ad.
+  //
+  // Note that there is a way to safely swing the object's markword from
+  // one stack location to another.  This avoids inflation.  Obviously,
+  // we need to ensure that both locations refer to the current thread's stack.
+  // There are some subtle concurrency issues, however, and since the benefit is
+  // is small (given the support for inflated fast-path locking in the fast_lock, etc)
+  // we'll leave that optimization for another time.
+
+  if (displaced_header()->is_neutral()) {
+    ObjectSynchronizer::inflate_helper(obj);
+    // WARNING: We can not put check here, because the inflation
+    // will not update the displaced header. Once BasicLock is inflated,
+    // no one should ever look at its content.
+  } else {
+    // Typically the displaced header will be 0 (recursive stack lock) or
+    // unused_mark.  Naively we'd like to assert that the displaced mark
+    // value is either 0, neutral, or 3.  But with the advent of the
+    // store-before-CAS avoidance in fast_lock/compiler_lock_object
+    // we can find any flavor mark in the displaced mark.
+  }
+// [RGV] The next line appears to do nothing!
+  intptr_t dh = (intptr_t) displaced_header();
+  dest->set_displaced_header(displaced_header());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/basicLock.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class BasicLock VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+ private:
+  volatile markOop _displaced_header;
+ public:
+  markOop      displaced_header() const               { return _displaced_header; }
+  void         set_displaced_header(markOop header)   { _displaced_header = header; }
+
+  void print_on(outputStream* st) const;
+
+  // move a basic lock (used during deoptimization
+  void move_to(oop obj, BasicLock* dest);
+
+  static int displaced_header_offset_in_bytes()       { return offset_of(BasicLock, _displaced_header); }
+};
+
+// A BasicObjectLock associates a specific Java object with a BasicLock.
+// It is currently embedded in an interpreter frame.
+
+// Because some machines have alignment restrictions on the control stack,
+// the actual space allocated by the interpreter may include padding words
+// after the end of the BasicObjectLock.  Also, in order to guarantee
+// alignment of the embedded BasicLock objects on such machines, we
+// put the embedded BasicLock at the beginning of the struct.
+
+class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+ private:
+  BasicLock _lock;                                    // the lock, must be double word aligned
+  oop       _obj;                                     // object holds the lock;
+
+ public:
+  // Manipulation
+  oop      obj() const                                { return _obj;  }
+  void set_obj(oop obj)                               { _obj = obj; }
+  BasicLock* lock()                                   { return &_lock; }
+
+  // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
+  //       in interpreter activation frames since it includes machine-specific padding.
+  static int size()                                   { return sizeof(BasicObjectLock)/wordSize; }
+
+  // GC support
+  void oops_do(OopClosure* f) { f->do_oop(&_obj); }
+
+  static int obj_offset_in_bytes()                    { return offset_of(BasicObjectLock, _obj);  }
+  static int lock_offset_in_bytes()                   { return offset_of(BasicObjectLock, _lock); }
+};
+
--- a/src/share/vm/runtime/globals.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/globals.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -327,10 +327,10 @@
   /* UseMembar is theoretically a temp flag used for memory barrier         \
    * removal testing.  It was supposed to be removed before FCS but has     \
    * been re-added (see 6401008) */                                         \
-  product(bool, UseMembar, false,                                           \
+  product_pd(bool, UseMembar,                                               \
           "(Unstable) Issues membars on thread state transitions")          \
                                                                             \
-  /* Temporary: See 6948537 */                                             \
+  /* Temporary: See 6948537 */                                              \
   experimental(bool, UseMemSetInBOT, true,                                  \
           "(Unstable) uses memset in BOT updates in GC code")               \
                                                                             \
@@ -822,6 +822,9 @@
   develop(bool, PrintJVMWarnings, false,                                    \
           "Prints warnings for unimplemented JVM functions")                \
                                                                             \
+  product(bool, PrintWarnings, true,                                        \
+          "Prints JVM warnings to output stream")                           \
+                                                                            \
   notproduct(uintx, WarnOnStalledSpinLock, 0,                               \
           "Prints warnings for stalled SpinLocks")                          \
                                                                             \
@@ -3535,7 +3538,7 @@
   product(uintx, SharedDummyBlockSize, 512*M,                               \
           "Size of dummy block used to shift heap addresses (in bytes)")    \
                                                                             \
-  product(uintx, SharedReadWriteSize,  12*M,                                \
+  product(uintx, SharedReadWriteSize,  NOT_LP64(12*M) LP64_ONLY(13*M),      \
           "Size of read-write space in permanent generation (in bytes)")    \
                                                                             \
   product(uintx, SharedReadOnlySize,   10*M,                                \
--- a/src/share/vm/runtime/mutex.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/mutex.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -265,48 +265,3 @@
    }
 };
 
-/*
- * Per-thread blocking support for JSR166. See the Java-level
- * Documentation for rationale. Basically, park acts like wait, unpark
- * like notify.
- *
- * 6271289 --
- * To avoid errors where an os thread expires but the JavaThread still
- * exists, Parkers are immortal (type-stable) and are recycled across
- * new threads.  This parallels the ParkEvent implementation.
- * Because park-unpark allow spurious wakeups it is harmless if an
- * unpark call unparks a new thread using the old Parker reference.
- *
- * In the future we'll want to think about eliminating Parker and using
- * ParkEvent instead.  There's considerable duplication between the two
- * services.
- *
- */
-
-class Parker : public os::PlatformParker {
-private:
-  volatile int _counter ;
-  Parker * FreeNext ;
-  JavaThread * AssociatedWith ; // Current association
-
-public:
-  Parker() : PlatformParker() {
-    _counter       = 0 ;
-    FreeNext       = NULL ;
-    AssociatedWith = NULL ;
-  }
-protected:
-  ~Parker() { ShouldNotReachHere(); }
-public:
-  // For simplicity of interface with Java, all forms of park (indefinite,
-  // relative, and absolute) are multiplexed into one call.
-  void park(bool isAbsolute, jlong time);
-  void unpark();
-
-  // Lifecycle operators
-  static Parker * Allocate (JavaThread * t) ;
-  static void Release (Parker * e) ;
-private:
-  static Parker * volatile FreeList ;
-  static volatile int ListLock ;
-};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/objectMonitor.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,2421 @@
+/*
+ * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_objectMonitor.cpp.incl"
+
+#if defined(__GNUC__) && !defined(IA64)
+  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+  #define ATTR __attribute__((noinline))
+#else
+  #define ATTR
+#endif
+
+
+#ifdef DTRACE_ENABLED
+
+// Only bother with this argument setup if dtrace is available
+// TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
+
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
+  jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
+  jlong, uintptr_t, char*, int);
+
+#define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
+  char* bytes = NULL;                                                      \
+  int len = 0;                                                             \
+  jlong jtid = SharedRuntime::get_java_tid(thread);                        \
+  symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name();  \
+  if (klassname != NULL) {                                                 \
+    bytes = (char*)klassname->bytes();                                     \
+    len = klassname->utf8_length();                                        \
+  }
+
+#define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis)       \
+  {                                                                        \
+    if (DTraceMonitorProbes) {                                            \
+      DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
+      HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
+                       (monitor), bytes, len, (millis));                   \
+    }                                                                      \
+  }
+
+#define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread)             \
+  {                                                                        \
+    if (DTraceMonitorProbes) {                                            \
+      DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
+      HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
+                       (uintptr_t)(monitor), bytes, len);                  \
+    }                                                                      \
+  }
+
+#else //  ndef DTRACE_ENABLED
+
+#define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon)    {;}
+#define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon)          {;}
+
+#endif // ndef DTRACE_ENABLED
+
+// Tunables ...
+// The knob* variables are effectively final.  Once set they should
+// never be modified hence.  Consider using __read_mostly with GCC.
+
+int ObjectMonitor::Knob_Verbose    = 0 ;
+int ObjectMonitor::Knob_SpinLimit  = 5000 ;    // derived by an external tool -
+static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
+static int Knob_HandOff            = 0 ;
+static int Knob_ReportSettings     = 0 ;
+
+static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
+static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
+static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
+static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
+static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
+static int Knob_SpinEarly          = 1 ;
+static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
+static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
+static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
+static int Knob_Bonus              = 100 ;     // spin success bonus
+static int Knob_BonusB             = 100 ;     // spin success bonus
+static int Knob_Penalty            = 200 ;     // spin failure penalty
+static int Knob_Poverty            = 1000 ;
+static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
+static int Knob_FixedSpin          = 0 ;
+static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
+static int Knob_UsePause           = 1 ;
+static int Knob_ExitPolicy         = 0 ;
+static int Knob_PreSpin            = 10 ;      // 20-100 likely better
+static int Knob_ResetEvent         = 0 ;
+static int BackOffMask             = 0 ;
+
+static int Knob_FastHSSEC          = 0 ;
+static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
+static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
+static volatile int InitDone       = 0 ;
+
+#define TrySpin TrySpin_VaryDuration
+
+// -----------------------------------------------------------------------------
+// Theory of operations -- Monitors lists, thread residency, etc:
+//
+// * A thread acquires ownership of a monitor by successfully
+//   CAS()ing the _owner field from null to non-null.
+//
+// * Invariant: A thread appears on at most one monitor list --
+//   cxq, EntryList or WaitSet -- at any one time.
+//
+// * Contending threads "push" themselves onto the cxq with CAS
+//   and then spin/park.
+//
+// * After a contending thread eventually acquires the lock it must
+//   dequeue itself from either the EntryList or the cxq.
+//
+// * The exiting thread identifies and unparks an "heir presumptive"
+//   tentative successor thread on the EntryList.  Critically, the
+//   exiting thread doesn't unlink the successor thread from the EntryList.
+//   After having been unparked, the wakee will recontend for ownership of
+//   the monitor.   The successor (wakee) will either acquire the lock or
+//   re-park itself.
+//
+//   Succession is provided for by a policy of competitive handoff.
+//   The exiting thread does _not_ grant or pass ownership to the
+//   successor thread.  (This is also referred to as "handoff" succession").
+//   Instead the exiting thread releases ownership and possibly wakes
+//   a successor, so the successor can (re)compete for ownership of the lock.
+//   If the EntryList is empty but the cxq is populated the exiting
+//   thread will drain the cxq into the EntryList.  It does so by
+//   by detaching the cxq (installing null with CAS) and folding
+//   the threads from the cxq into the EntryList.  The EntryList is
+//   doubly linked, while the cxq is singly linked because of the
+//   CAS-based "push" used to enqueue recently arrived threads (RATs).
+//
+// * Concurrency invariants:
+//
+//   -- only the monitor owner may access or mutate the EntryList.
+//      The mutex property of the monitor itself protects the EntryList
+//      from concurrent interference.
+//   -- Only the monitor owner may detach the cxq.
+//
+// * The monitor entry list operations avoid locks, but strictly speaking
+//   they're not lock-free.  Enter is lock-free, exit is not.
+//   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
+//
+// * The cxq can have multiple concurrent "pushers" but only one concurrent
+//   detaching thread.  This mechanism is immune from the ABA corruption.
+//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
+//
+// * Taken together, the cxq and the EntryList constitute or form a
+//   single logical queue of threads stalled trying to acquire the lock.
+//   We use two distinct lists to improve the odds of a constant-time
+//   dequeue operation after acquisition (in the ::enter() epilog) and
+//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
+//   A key desideratum is to minimize queue & monitor metadata manipulation
+//   that occurs while holding the monitor lock -- that is, we want to
+//   minimize monitor lock holds times.  Note that even a small amount of
+//   fixed spinning will greatly reduce the # of enqueue-dequeue operations
+//   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
+//   locks and monitor metadata.
+//
+//   Cxq points to the the set of Recently Arrived Threads attempting entry.
+//   Because we push threads onto _cxq with CAS, the RATs must take the form of
+//   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
+//   the unlocking thread notices that EntryList is null but _cxq is != null.
+//
+//   The EntryList is ordered by the prevailing queue discipline and
+//   can be organized in any convenient fashion, such as a doubly-linked list or
+//   a circular doubly-linked list.  Critically, we want insert and delete operations
+//   to operate in constant-time.  If we need a priority queue then something akin
+//   to Solaris' sleepq would work nicely.  Viz.,
+//   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
+//   Queue discipline is enforced at ::exit() time, when the unlocking thread
+//   drains the cxq into the EntryList, and orders or reorders the threads on the
+//   EntryList accordingly.
+//
+//   Barring "lock barging", this mechanism provides fair cyclic ordering,
+//   somewhat similar to an elevator-scan.
+//
+// * The monitor synchronization subsystem avoids the use of native
+//   synchronization primitives except for the narrow platform-specific
+//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
+//   the semantics of park-unpark.  Put another way, this monitor implementation
+//   depends only on atomic operations and park-unpark.  The monitor subsystem
+//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
+//   underlying OS manages the READY<->RUN transitions.
+//
+// * Waiting threads reside on the WaitSet list -- wait() puts
+//   the caller onto the WaitSet.
+//
+// * notify() or notifyAll() simply transfers threads from the WaitSet to
+//   either the EntryList or cxq.  Subsequent exit() operations will
+//   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
+//   it's likely the notifyee would simply impale itself on the lock held
+//   by the notifier.
+//
+// * An interesting alternative is to encode cxq as (List,LockByte) where
+//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
+//   variable, like _recursions, in the scheme.  The threads or Events that form
+//   the list would have to be aligned in 256-byte addresses.  A thread would
+//   try to acquire the lock or enqueue itself with CAS, but exiting threads
+//   could use a 1-0 protocol and simply STB to set the LockByte to 0.
+//   Note that is is *not* word-tearing, but it does presume that full-word
+//   CAS operations are coherent with intermix with STB operations.  That's true
+//   on most common processors.
+//
+// * See also http://blogs.sun.com/dave
+
+
+// -----------------------------------------------------------------------------
+// Enter support
+
+bool ObjectMonitor::try_enter(Thread* THREAD) {
+  if (THREAD != _owner) {
+    if (THREAD->is_lock_owned ((address)_owner)) {
+       assert(_recursions == 0, "internal state error");
+       _owner = THREAD ;
+       _recursions = 1 ;
+       OwnerIsThread = 1 ;
+       return true;
+    }
+    if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+      return false;
+    }
+    return true;
+  } else {
+    _recursions++;
+    return true;
+  }
+}
+
+void ATTR ObjectMonitor::enter(TRAPS) {
+  // The following code is ordered to check the most common cases first
+  // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
+  Thread * const Self = THREAD ;
+  void * cur ;
+
+  cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
+  if (cur == NULL) {
+     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
+     assert (_recursions == 0   , "invariant") ;
+     assert (_owner      == Self, "invariant") ;
+     // CONSIDER: set or assert OwnerIsThread == 1
+     return ;
+  }
+
+  if (cur == Self) {
+     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
+     _recursions ++ ;
+     return ;
+  }
+
+  if (Self->is_lock_owned ((address)cur)) {
+    assert (_recursions == 0, "internal state error");
+    _recursions = 1 ;
+    // Commute owner from a thread-specific on-stack BasicLockObject address to
+    // a full-fledged "Thread *".
+    _owner = Self ;
+    OwnerIsThread = 1 ;
+    return ;
+  }
+
+  // We've encountered genuine contention.
+  assert (Self->_Stalled == 0, "invariant") ;
+  Self->_Stalled = intptr_t(this) ;
+
+  // Try one round of spinning *before* enqueueing Self
+  // and before going through the awkward and expensive state
+  // transitions.  The following spin is strictly optional ...
+  // Note that if we acquire the monitor from an initial spin
+  // we forgo posting JVMTI events and firing DTRACE probes.
+  if (Knob_SpinEarly && TrySpin (Self) > 0) {
+     assert (_owner == Self      , "invariant") ;
+     assert (_recursions == 0    , "invariant") ;
+     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+     Self->_Stalled = 0 ;
+     return ;
+  }
+
+  assert (_owner != Self          , "invariant") ;
+  assert (_succ  != Self          , "invariant") ;
+  assert (Self->is_Java_thread()  , "invariant") ;
+  JavaThread * jt = (JavaThread *) Self ;
+  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
+  assert (jt->thread_state() != _thread_blocked   , "invariant") ;
+  assert (this->object() != NULL  , "invariant") ;
+  assert (_count >= 0, "invariant") ;
+
+  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
+  // Ensure the object-monitor relationship remains stable while there's contention.
+  Atomic::inc_ptr(&_count);
+
+  { // Change java thread status to indicate blocked on monitor enter.
+    JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
+
+    DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
+    if (JvmtiExport::should_post_monitor_contended_enter()) {
+      JvmtiExport::post_monitor_contended_enter(jt, this);
+    }
+
+    OSThreadContendState osts(Self->osthread());
+    ThreadBlockInVM tbivm(jt);
+
+    Self->set_current_pending_monitor(this);
+
+    // TODO-FIXME: change the following for(;;) loop to straight-line code.
+    for (;;) {
+      jt->set_suspend_equivalent();
+      // cleared by handle_special_suspend_equivalent_condition()
+      // or java_suspend_self()
+
+      EnterI (THREAD) ;
+
+      if (!ExitSuspendEquivalent(jt)) break ;
+
+      //
+      // We have acquired the contended monitor, but while we were
+      // waiting another thread suspended us. We don't want to enter
+      // the monitor while suspended because that would surprise the
+      // thread that suspended us.
+      //
+          _recursions = 0 ;
+      _succ = NULL ;
+      exit (Self) ;
+
+      jt->java_suspend_self();
+    }
+    Self->set_current_pending_monitor(NULL);
+  }
+
+  Atomic::dec_ptr(&_count);
+  assert (_count >= 0, "invariant") ;
+  Self->_Stalled = 0 ;
+
+  // Must either set _recursions = 0 or ASSERT _recursions == 0.
+  assert (_recursions == 0     , "invariant") ;
+  assert (_owner == Self       , "invariant") ;
+  assert (_succ  != Self       , "invariant") ;
+  assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+
+  // The thread -- now the owner -- is back in vm mode.
+  // Report the glorious news via TI,DTrace and jvmstat.
+  // The probe effect is non-trivial.  All the reportage occurs
+  // while we hold the monitor, increasing the length of the critical
+  // section.  Amdahl's parallel speedup law comes vividly into play.
+  //
+  // Another option might be to aggregate the events (thread local or
+  // per-monitor aggregation) and defer reporting until a more opportune
+  // time -- such as next time some thread encounters contention but has
+  // yet to acquire the lock.  While spinning that thread could
+  // spinning we could increment JVMStat counters, etc.
+
+  DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
+  if (JvmtiExport::should_post_monitor_contended_entered()) {
+    JvmtiExport::post_monitor_contended_entered(jt, this);
+  }
+  if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
+     ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
+  }
+}
+
+
+// Caveat: TryLock() is not necessarily serializing if it returns failure.
+// Callers must compensate as needed.
+
+int ObjectMonitor::TryLock (Thread * Self) {
+   for (;;) {
+      void * own = _owner ;
+      if (own != NULL) return 0 ;
+      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+         // Either guarantee _recursions == 0 or set _recursions = 0.
+         assert (_recursions == 0, "invariant") ;
+         assert (_owner == Self, "invariant") ;
+         // CONSIDER: set or assert that OwnerIsThread == 1
+         return 1 ;
+      }
+      // The lock had been free momentarily, but we lost the race to the lock.
+      // Interference -- the CAS failed.
+      // We can either return -1 or retry.
+      // Retry doesn't make as much sense because the lock was just acquired.
+      if (true) return -1 ;
+   }
+}
+
+void ATTR ObjectMonitor::EnterI (TRAPS) {
+    Thread * Self = THREAD ;
+    assert (Self->is_Java_thread(), "invariant") ;
+    assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
+
+    // Try the lock - TATAS
+    if (TryLock (Self) > 0) {
+        assert (_succ != Self              , "invariant") ;
+        assert (_owner == Self             , "invariant") ;
+        assert (_Responsible != Self       , "invariant") ;
+        return ;
+    }
+
+    DeferredInitialize () ;
+
+    // We try one round of spinning *before* enqueueing Self.
+    //
+    // If the _owner is ready but OFFPROC we could use a YieldTo()
+    // operation to donate the remainder of this thread's quantum
+    // to the owner.  This has subtle but beneficial affinity
+    // effects.
+
+    if (TrySpin (Self) > 0) {
+        assert (_owner == Self        , "invariant") ;
+        assert (_succ != Self         , "invariant") ;
+        assert (_Responsible != Self  , "invariant") ;
+        return ;
+    }
+
+    // The Spin failed -- Enqueue and park the thread ...
+    assert (_succ  != Self            , "invariant") ;
+    assert (_owner != Self            , "invariant") ;
+    assert (_Responsible != Self      , "invariant") ;
+
+    // Enqueue "Self" on ObjectMonitor's _cxq.
+    //
+    // Node acts as a proxy for Self.
+    // As an aside, if were to ever rewrite the synchronization code mostly
+    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
+    // Java objects.  This would avoid awkward lifecycle and liveness issues,
+    // as well as eliminate a subset of ABA issues.
+    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
+    //
+
+    ObjectWaiter node(Self) ;
+    Self->_ParkEvent->reset() ;
+    node._prev   = (ObjectWaiter *) 0xBAD ;
+    node.TState  = ObjectWaiter::TS_CXQ ;
+
+    // Push "Self" onto the front of the _cxq.
+    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
+    // Note that spinning tends to reduce the rate at which threads
+    // enqueue and dequeue on EntryList|cxq.
+    ObjectWaiter * nxt ;
+    for (;;) {
+        node._next = nxt = _cxq ;
+        if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
+
+        // Interference - the CAS failed because _cxq changed.  Just retry.
+        // As an optional optimization we retry the lock.
+        if (TryLock (Self) > 0) {
+            assert (_succ != Self         , "invariant") ;
+            assert (_owner == Self        , "invariant") ;
+            assert (_Responsible != Self  , "invariant") ;
+            return ;
+        }
+    }
+
+    // Check for cxq|EntryList edge transition to non-null.  This indicates
+    // the onset of contention.  While contention persists exiting threads
+    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
+    // operations revert to the faster 1-0 mode.  This enter operation may interleave
+    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
+    // arrange for one of the contending thread to use a timed park() operations
+    // to detect and recover from the race.  (Stranding is form of progress failure
+    // where the monitor is unlocked but all the contending threads remain parked).
+    // That is, at least one of the contended threads will periodically poll _owner.
+    // One of the contending threads will become the designated "Responsible" thread.
+    // The Responsible thread uses a timed park instead of a normal indefinite park
+    // operation -- it periodically wakes and checks for and recovers from potential
+    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
+    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
+    // be responsible for a monitor.
+    //
+    // Currently, one of the contended threads takes on the added role of "Responsible".
+    // A viable alternative would be to use a dedicated "stranding checker" thread
+    // that periodically iterated over all the threads (or active monitors) and unparked
+    // successors where there was risk of stranding.  This would help eliminate the
+    // timer scalability issues we see on some platforms as we'd only have one thread
+    // -- the checker -- parked on a timer.
+
+    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
+        // Try to assume the role of responsible thread for the monitor.
+        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
+        Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
+    }
+
+    // The lock have been released while this thread was occupied queueing
+    // itself onto _cxq.  To close the race and avoid "stranding" and
+    // progress-liveness failure we must resample-retry _owner before parking.
+    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
+    // In this case the ST-MEMBAR is accomplished with CAS().
+    //
+    // TODO: Defer all thread state transitions until park-time.
+    // Since state transitions are heavy and inefficient we'd like
+    // to defer the state transitions until absolutely necessary,
+    // and in doing so avoid some transitions ...
+
+    TEVENT (Inflated enter - Contention) ;
+    int nWakeups = 0 ;
+    int RecheckInterval = 1 ;
+
+    for (;;) {
+
+        if (TryLock (Self) > 0) break ;
+        assert (_owner != Self, "invariant") ;
+
+        if ((SyncFlags & 2) && _Responsible == NULL) {
+           Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
+        }
+
+        // park self
+        if (_Responsible == Self || (SyncFlags & 1)) {
+            TEVENT (Inflated enter - park TIMED) ;
+            Self->_ParkEvent->park ((jlong) RecheckInterval) ;
+            // Increase the RecheckInterval, but clamp the value.
+            RecheckInterval *= 8 ;
+            if (RecheckInterval > 1000) RecheckInterval = 1000 ;
+        } else {
+            TEVENT (Inflated enter - park UNTIMED) ;
+            Self->_ParkEvent->park() ;
+        }
+
+        if (TryLock(Self) > 0) break ;
+
+        // The lock is still contested.
+        // Keep a tally of the # of futile wakeups.
+        // Note that the counter is not protected by a lock or updated by atomics.
+        // That is by design - we trade "lossy" counters which are exposed to
+        // races during updates for a lower probe effect.
+        TEVENT (Inflated enter - Futile wakeup) ;
+        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+           ObjectMonitor::_sync_FutileWakeups->inc() ;
+        }
+        ++ nWakeups ;
+
+        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
+        // We can defer clearing _succ until after the spin completes
+        // TrySpin() must tolerate being called with _succ == Self.
+        // Try yet another round of adaptive spinning.
+        if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
+
+        // We can find that we were unpark()ed and redesignated _succ while
+        // we were spinning.  That's harmless.  If we iterate and call park(),
+        // park() will consume the event and return immediately and we'll
+        // just spin again.  This pattern can repeat, leaving _succ to simply
+        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
+        // Alternately, we can sample fired() here, and if set, forgo spinning
+        // in the next iteration.
+
+        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
+           Self->_ParkEvent->reset() ;
+           OrderAccess::fence() ;
+        }
+        if (_succ == Self) _succ = NULL ;
+
+        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+        OrderAccess::fence() ;
+    }
+
+    // Egress :
+    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
+    // Normally we'll find Self on the EntryList .
+    // From the perspective of the lock owner (this thread), the
+    // EntryList is stable and cxq is prepend-only.
+    // The head of cxq is volatile but the interior is stable.
+    // In addition, Self.TState is stable.
+
+    assert (_owner == Self      , "invariant") ;
+    assert (object() != NULL    , "invariant") ;
+    // I'd like to write:
+    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+    // but as we're at a safepoint that's not safe.
+
+    UnlinkAfterAcquire (Self, &node) ;
+    if (_succ == Self) _succ = NULL ;
+
+    assert (_succ != Self, "invariant") ;
+    if (_Responsible == Self) {
+        _Responsible = NULL ;
+        // Dekker pivot-point.
+        // Consider OrderAccess::storeload() here
+
+        // We may leave threads on cxq|EntryList without a designated
+        // "Responsible" thread.  This is benign.  When this thread subsequently
+        // exits the monitor it can "see" such preexisting "old" threads --
+        // threads that arrived on the cxq|EntryList before the fence, above --
+        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
+        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
+        // non-null and elect a new "Responsible" timer thread.
+        //
+        // This thread executes:
+        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
+        //    LD cxq|EntryList               (in subsequent exit)
+        //
+        // Entering threads in the slow/contended path execute:
+        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
+        //    The (ST cxq; MEMBAR) is accomplished with CAS().
+        //
+        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
+        // exit operation from floating above the ST Responsible=null.
+        //
+        // In *practice* however, EnterI() is always followed by some atomic
+        // operation such as the decrement of _count in ::enter().  Those atomics
+        // obviate the need for the explicit MEMBAR, above.
+    }
+
+    // We've acquired ownership with CAS().
+    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
+    // But since the CAS() this thread may have also stored into _succ,
+    // EntryList, cxq or Responsible.  These meta-data updates must be
+    // visible __before this thread subsequently drops the lock.
+    // Consider what could occur if we didn't enforce this constraint --
+    // STs to monitor meta-data and user-data could reorder with (become
+    // visible after) the ST in exit that drops ownership of the lock.
+    // Some other thread could then acquire the lock, but observe inconsistent
+    // or old monitor meta-data and heap data.  That violates the JMM.
+    // To that end, the 1-0 exit() operation must have at least STST|LDST
+    // "release" barrier semantics.  Specifically, there must be at least a
+    // STST|LDST barrier in exit() before the ST of null into _owner that drops
+    // the lock.   The barrier ensures that changes to monitor meta-data and data
+    // protected by the lock will be visible before we release the lock, and
+    // therefore before some other thread (CPU) has a chance to acquire the lock.
+    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+    //
+    // Critically, any prior STs to _succ or EntryList must be visible before
+    // the ST of null into _owner in the *subsequent* (following) corresponding
+    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
+    // execute a serializing instruction.
+
+    if (SyncFlags & 8) {
+       OrderAccess::fence() ;
+    }
+    return ;
+}
+
+// ReenterI() is a specialized inline form of the latter half of the
+// contended slow-path from EnterI().  We use ReenterI() only for
+// monitor reentry in wait().
+//
+// In the future we should reconcile EnterI() and ReenterI(), adding
+// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
+// loop accordingly.
+
+void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
+    assert (Self != NULL                , "invariant") ;
+    assert (SelfNode != NULL            , "invariant") ;
+    assert (SelfNode->_thread == Self   , "invariant") ;
+    assert (_waiters > 0                , "invariant") ;
+    assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
+    assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
+    JavaThread * jt = (JavaThread *) Self ;
+
+    int nWakeups = 0 ;
+    for (;;) {
+        ObjectWaiter::TStates v = SelfNode->TState ;
+        guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
+        assert    (_owner != Self, "invariant") ;
+
+        if (TryLock (Self) > 0) break ;
+        if (TrySpin (Self) > 0) break ;
+
+        TEVENT (Wait Reentry - parking) ;
+
+        // State transition wrappers around park() ...
+        // ReenterI() wisely defers state transitions until
+        // it's clear we must park the thread.
+        {
+           OSThreadContendState osts(Self->osthread());
+           ThreadBlockInVM tbivm(jt);
+
+           // cleared by handle_special_suspend_equivalent_condition()
+           // or java_suspend_self()
+           jt->set_suspend_equivalent();
+           if (SyncFlags & 1) {
+              Self->_ParkEvent->park ((jlong)1000) ;
+           } else {
+              Self->_ParkEvent->park () ;
+           }
+
+           // were we externally suspended while we were waiting?
+           for (;;) {
+              if (!ExitSuspendEquivalent (jt)) break ;
+              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
+              jt->java_suspend_self();
+              jt->set_suspend_equivalent();
+           }
+        }
+
+        // Try again, but just so we distinguish between futile wakeups and
+        // successful wakeups.  The following test isn't algorithmically
+        // necessary, but it helps us maintain sensible statistics.
+        if (TryLock(Self) > 0) break ;
+
+        // The lock is still contested.
+        // Keep a tally of the # of futile wakeups.
+        // Note that the counter is not protected by a lock or updated by atomics.
+        // That is by design - we trade "lossy" counters which are exposed to
+        // races during updates for a lower probe effect.
+        TEVENT (Wait Reentry - futile wakeup) ;
+        ++ nWakeups ;
+
+        // Assuming this is not a spurious wakeup we'll normally
+        // find that _succ == Self.
+        if (_succ == Self) _succ = NULL ;
+
+        // Invariant: after clearing _succ a contending thread
+        // *must* retry  _owner before parking.
+        OrderAccess::fence() ;
+
+        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
+          ObjectMonitor::_sync_FutileWakeups->inc() ;
+        }
+    }
+
+    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
+    // Normally we'll find Self on the EntryList.
+    // Unlinking from the EntryList is constant-time and atomic-free.
+    // From the perspective of the lock owner (this thread), the
+    // EntryList is stable and cxq is prepend-only.
+    // The head of cxq is volatile but the interior is stable.
+    // In addition, Self.TState is stable.
+
+    assert (_owner == Self, "invariant") ;
+    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+    UnlinkAfterAcquire (Self, SelfNode) ;
+    if (_succ == Self) _succ = NULL ;
+    assert (_succ != Self, "invariant") ;
+    SelfNode->TState = ObjectWaiter::TS_RUN ;
+    OrderAccess::fence() ;      // see comments at the end of EnterI()
+}
+
+// after the thread acquires the lock in ::enter().  Equally, we could defer
+// unlinking the thread until ::exit()-time.
+
+void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
+{
+    assert (_owner == Self, "invariant") ;
+    assert (SelfNode->_thread == Self, "invariant") ;
+
+    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
+        // Normal case: remove Self from the DLL EntryList .
+        // This is a constant-time operation.
+        ObjectWaiter * nxt = SelfNode->_next ;
+        ObjectWaiter * prv = SelfNode->_prev ;
+        if (nxt != NULL) nxt->_prev = prv ;
+        if (prv != NULL) prv->_next = nxt ;
+        if (SelfNode == _EntryList ) _EntryList = nxt ;
+        assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        TEVENT (Unlink from EntryList) ;
+    } else {
+        guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+        // Inopportune interleaving -- Self is still on the cxq.
+        // This usually means the enqueue of self raced an exiting thread.
+        // Normally we'll find Self near the front of the cxq, so
+        // dequeueing is typically fast.  If needbe we can accelerate
+        // this with some MCS/CHL-like bidirectional list hints and advisory
+        // back-links so dequeueing from the interior will normally operate
+        // in constant-time.
+        // Dequeue Self from either the head (with CAS) or from the interior
+        // with a linear-time scan and normal non-atomic memory operations.
+        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
+        // and then unlink Self from EntryList.  We have to drain eventually,
+        // so it might as well be now.
+
+        ObjectWaiter * v = _cxq ;
+        assert (v != NULL, "invariant") ;
+        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+            // The CAS above can fail from interference IFF a "RAT" arrived.
+            // In that case Self must be in the interior and can no longer be
+            // at the head of cxq.
+            if (v == SelfNode) {
+                assert (_cxq != v, "invariant") ;
+                v = _cxq ;          // CAS above failed - start scan at head of list
+            }
+            ObjectWaiter * p ;
+            ObjectWaiter * q = NULL ;
+            for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
+                q = p ;
+                assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+            }
+            assert (v != SelfNode,  "invariant") ;
+            assert (p == SelfNode,  "Node not found on cxq") ;
+            assert (p != _cxq,      "invariant") ;
+            assert (q != NULL,      "invariant") ;
+            assert (q->_next == p,  "invariant") ;
+            q->_next = p->_next ;
+        }
+        TEVENT (Unlink from cxq) ;
+    }
+
+    // Diagnostic hygiene ...
+    SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
+    SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
+    SelfNode->TState = ObjectWaiter::TS_RUN ;
+}
+
+// -----------------------------------------------------------------------------
+// Exit support
+//
+// exit()
+// ~~~~~~
+// Note that the collector can't reclaim the objectMonitor or deflate
+// the object out from underneath the thread calling ::exit() as the
+// thread calling ::exit() never transitions to a stable state.
+// This inhibits GC, which in turn inhibits asynchronous (and
+// inopportune) reclamation of "this".
+//
+// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
+// There's one exception to the claim above, however.  EnterI() can call
+// exit() to drop a lock if the acquirer has been externally suspended.
+// In that case exit() is called with _thread_state as _thread_blocked,
+// but the monitor's _count field is > 0, which inhibits reclamation.
+//
+// 1-0 exit
+// ~~~~~~~~
+// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
+// the fast-path operators have been optimized so the common ::exit()
+// operation is 1-0.  See i486.ad fast_unlock(), for instance.
+// The code emitted by fast_unlock() elides the usual MEMBAR.  This
+// greatly improves latency -- MEMBAR and CAS having considerable local
+// latency on modern processors -- but at the cost of "stranding".  Absent the
+// MEMBAR, a thread in fast_unlock() can race a thread in the slow
+// ::enter() path, resulting in the entering thread being stranding
+// and a progress-liveness failure.   Stranding is extremely rare.
+// We use timers (timed park operations) & periodic polling to detect
+// and recover from stranding.  Potentially stranded threads periodically
+// wake up and poll the lock.  See the usage of the _Responsible variable.
+//
+// The CAS() in enter provides for safety and exclusion, while the CAS or
+// MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
+// eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
+// We detect and recover from stranding with timers.
+//
+// If a thread transiently strands it'll park until (a) another
+// thread acquires the lock and then drops the lock, at which time the
+// exiting thread will notice and unpark the stranded thread, or, (b)
+// the timer expires.  If the lock is high traffic then the stranding latency
+// will be low due to (a).  If the lock is low traffic then the odds of
+// stranding are lower, although the worst-case stranding latency
+// is longer.  Critically, we don't want to put excessive load in the
+// platform's timer subsystem.  We want to minimize both the timer injection
+// rate (timers created/sec) as well as the number of timers active at
+// any one time.  (more precisely, we want to minimize timer-seconds, which is
+// the integral of the # of active timers at any instant over time).
+// Both impinge on OS scalability.  Given that, at most one thread parked on
+// a monitor will use a timer.
+
+void ATTR ObjectMonitor::exit(TRAPS) {
+   Thread * Self = THREAD ;
+   if (THREAD != _owner) {
+     if (THREAD->is_lock_owned((address) _owner)) {
+       // Transmute _owner from a BasicLock pointer to a Thread address.
+       // We don't need to hold _mutex for this transition.
+       // Non-null to Non-null is safe as long as all readers can
+       // tolerate either flavor.
+       assert (_recursions == 0, "invariant") ;
+       _owner = THREAD ;
+       _recursions = 0 ;
+       OwnerIsThread = 1 ;
+     } else {
+       // NOTE: we need to handle unbalanced monitor enter/exit
+       // in native code by throwing an exception.
+       // TODO: Throw an IllegalMonitorStateException ?
+       TEVENT (Exit - Throw IMSX) ;
+       assert(false, "Non-balanced monitor enter/exit!");
+       if (false) {
+          THROW(vmSymbols::java_lang_IllegalMonitorStateException());
+       }
+       return;
+     }
+   }
+
+   if (_recursions != 0) {
+     _recursions--;        // this is simple recursive enter
+     TEVENT (Inflated exit - recursive) ;
+     return ;
+   }
+
+   // Invariant: after setting Responsible=null an thread must execute
+   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
+   if ((SyncFlags & 4) == 0) {
+      _Responsible = NULL ;
+   }
+
+   for (;;) {
+      assert (THREAD == _owner, "invariant") ;
+
+
+      if (Knob_ExitPolicy == 0) {
+         // release semantics: prior loads and stores from within the critical section
+         // must not float (reorder) past the following store that drops the lock.
+         // On SPARC that requires MEMBAR #loadstore|#storestore.
+         // But of course in TSO #loadstore|#storestore is not required.
+         // I'd like to write one of the following:
+         // A.  OrderAccess::release() ; _owner = NULL
+         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
+         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
+         // store into a _dummy variable.  That store is not needed, but can result
+         // in massive wasteful coherency traffic on classic SMP systems.
+         // Instead, I use release_store(), which is implemented as just a simple
+         // ST on x64, x86 and SPARC.
+         OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
+         OrderAccess::storeload() ;                         // See if we need to wake a successor
+         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+            TEVENT (Inflated exit - simple egress) ;
+            return ;
+         }
+         TEVENT (Inflated exit - complex egress) ;
+
+         // Normally the exiting thread is responsible for ensuring succession,
+         // but if other successors are ready or other entering threads are spinning
+         // then this thread can simply store NULL into _owner and exit without
+         // waking a successor.  The existence of spinners or ready successors
+         // guarantees proper succession (liveness).  Responsibility passes to the
+         // ready or running successors.  The exiting thread delegates the duty.
+         // More precisely, if a successor already exists this thread is absolved
+         // of the responsibility of waking (unparking) one.
+         //
+         // The _succ variable is critical to reducing futile wakeup frequency.
+         // _succ identifies the "heir presumptive" thread that has been made
+         // ready (unparked) but that has not yet run.  We need only one such
+         // successor thread to guarantee progress.
+         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+         // section 3.3 "Futile Wakeup Throttling" for details.
+         //
+         // Note that spinners in Enter() also set _succ non-null.
+         // In the current implementation spinners opportunistically set
+         // _succ so that exiting threads might avoid waking a successor.
+         // Another less appealing alternative would be for the exiting thread
+         // to drop the lock and then spin briefly to see if a spinner managed
+         // to acquire the lock.  If so, the exiting thread could exit
+         // immediately without waking a successor, otherwise the exiting
+         // thread would need to dequeue and wake a successor.
+         // (Note that we'd need to make the post-drop spin short, but no
+         // shorter than the worst-case round-trip cache-line migration time.
+         // The dropped lock needs to become visible to the spinner, and then
+         // the acquisition of the lock by the spinner must become visible to
+         // the exiting thread).
+         //
+
+         // It appears that an heir-presumptive (successor) must be made ready.
+         // Only the current lock owner can manipulate the EntryList or
+         // drain _cxq, so we need to reacquire the lock.  If we fail
+         // to reacquire the lock the responsibility for ensuring succession
+         // falls to the new owner.
+         //
+         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+            return ;
+         }
+         TEVENT (Exit - Reacquired) ;
+      } else {
+         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+            OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
+            OrderAccess::storeload() ;
+            // Ratify the previously observed values.
+            if (_cxq == NULL || _succ != NULL) {
+                TEVENT (Inflated exit - simple egress) ;
+                return ;
+            }
+
+            // inopportune interleaving -- the exiting thread (this thread)
+            // in the fast-exit path raced an entering thread in the slow-enter
+            // path.
+            // We have two choices:
+            // A.  Try to reacquire the lock.
+            //     If the CAS() fails return immediately, otherwise
+            //     we either restart/rerun the exit operation, or simply
+            //     fall-through into the code below which wakes a successor.
+            // B.  If the elements forming the EntryList|cxq are TSM
+            //     we could simply unpark() the lead thread and return
+            //     without having set _succ.
+            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+               TEVENT (Inflated exit - reacquired succeeded) ;
+               return ;
+            }
+            TEVENT (Inflated exit - reacquired failed) ;
+         } else {
+            TEVENT (Inflated exit - complex egress) ;
+         }
+      }
+
+      guarantee (_owner == THREAD, "invariant") ;
+
+      ObjectWaiter * w = NULL ;
+      int QMode = Knob_QMode ;
+
+      if (QMode == 2 && _cxq != NULL) {
+          // QMode == 2 : cxq has precedence over EntryList.
+          // Try to directly wake a successor from the cxq.
+          // If successful, the successor will need to unlink itself from cxq.
+          w = _cxq ;
+          assert (w != NULL, "invariant") ;
+          assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+          ExitEpilog (Self, w) ;
+          return ;
+      }
+
+      if (QMode == 3 && _cxq != NULL) {
+          // Aggressively drain cxq into EntryList at the first opportunity.
+          // This policy ensure that recently-run threads live at the head of EntryList.
+          // Drain _cxq into EntryList - bulk transfer.
+          // First, detach _cxq.
+          // The following loop is tantamount to: w = swap (&cxq, NULL)
+          w = _cxq ;
+          for (;;) {
+             assert (w != NULL, "Invariant") ;
+             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+             if (u == w) break ;
+             w = u ;
+          }
+          assert (w != NULL              , "invariant") ;
+
+          ObjectWaiter * q = NULL ;
+          ObjectWaiter * p ;
+          for (p = w ; p != NULL ; p = p->_next) {
+              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+              p->TState = ObjectWaiter::TS_ENTER ;
+              p->_prev = q ;
+              q = p ;
+          }
+
+          // Append the RATs to the EntryList
+          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
+          ObjectWaiter * Tail ;
+          for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
+          if (Tail == NULL) {
+              _EntryList = w ;
+          } else {
+              Tail->_next = w ;
+              w->_prev = Tail ;
+          }
+
+          // Fall thru into code that tries to wake a successor from EntryList
+      }
+
+      if (QMode == 4 && _cxq != NULL) {
+          // Aggressively drain cxq into EntryList at the first opportunity.
+          // This policy ensure that recently-run threads live at the head of EntryList.
+
+          // Drain _cxq into EntryList - bulk transfer.
+          // First, detach _cxq.
+          // The following loop is tantamount to: w = swap (&cxq, NULL)
+          w = _cxq ;
+          for (;;) {
+             assert (w != NULL, "Invariant") ;
+             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+             if (u == w) break ;
+             w = u ;
+          }
+          assert (w != NULL              , "invariant") ;
+
+          ObjectWaiter * q = NULL ;
+          ObjectWaiter * p ;
+          for (p = w ; p != NULL ; p = p->_next) {
+              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+              p->TState = ObjectWaiter::TS_ENTER ;
+              p->_prev = q ;
+              q = p ;
+          }
+
+          // Prepend the RATs to the EntryList
+          if (_EntryList != NULL) {
+              q->_next = _EntryList ;
+              _EntryList->_prev = q ;
+          }
+          _EntryList = w ;
+
+          // Fall thru into code that tries to wake a successor from EntryList
+      }
+
+      w = _EntryList  ;
+      if (w != NULL) {
+          // I'd like to write: guarantee (w->_thread != Self).
+          // But in practice an exiting thread may find itself on the EntryList.
+          // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
+          // then calls exit().  Exit release the lock by setting O._owner to NULL.
+          // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
+          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
+          // release the lock "O".  T2 resumes immediately after the ST of null into
+          // _owner, above.  T2 notices that the EntryList is populated, so it
+          // reacquires the lock and then finds itself on the EntryList.
+          // Given all that, we have to tolerate the circumstance where "w" is
+          // associated with Self.
+          assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+          ExitEpilog (Self, w) ;
+          return ;
+      }
+
+      // If we find that both _cxq and EntryList are null then just
+      // re-run the exit protocol from the top.
+      w = _cxq ;
+      if (w == NULL) continue ;
+
+      // Drain _cxq into EntryList - bulk transfer.
+      // First, detach _cxq.
+      // The following loop is tantamount to: w = swap (&cxq, NULL)
+      for (;;) {
+          assert (w != NULL, "Invariant") ;
+          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+          if (u == w) break ;
+          w = u ;
+      }
+      TEVENT (Inflated exit - drain cxq into EntryList) ;
+
+      assert (w != NULL              , "invariant") ;
+      assert (_EntryList  == NULL    , "invariant") ;
+
+      // Convert the LIFO SLL anchored by _cxq into a DLL.
+      // The list reorganization step operates in O(LENGTH(w)) time.
+      // It's critical that this step operate quickly as
+      // "Self" still holds the outer-lock, restricting parallelism
+      // and effectively lengthening the critical section.
+      // Invariant: s chases t chases u.
+      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
+      // we have faster access to the tail.
+
+      if (QMode == 1) {
+         // QMode == 1 : drain cxq to EntryList, reversing order
+         // We also reverse the order of the list.
+         ObjectWaiter * s = NULL ;
+         ObjectWaiter * t = w ;
+         ObjectWaiter * u = NULL ;
+         while (t != NULL) {
+             guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+             t->TState = ObjectWaiter::TS_ENTER ;
+             u = t->_next ;
+             t->_prev = u ;
+             t->_next = s ;
+             s = t;
+             t = u ;
+         }
+         _EntryList  = s ;
+         assert (s != NULL, "invariant") ;
+      } else {
+         // QMode == 0 or QMode == 2
+         _EntryList = w ;
+         ObjectWaiter * q = NULL ;
+         ObjectWaiter * p ;
+         for (p = w ; p != NULL ; p = p->_next) {
+             guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+             p->TState = ObjectWaiter::TS_ENTER ;
+             p->_prev = q ;
+             q = p ;
+         }
+      }
+
+      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
+      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+
+      // See if we can abdicate to a spinner instead of waking a thread.
+      // A primary goal of the implementation is to reduce the
+      // context-switch rate.
+      if (_succ != NULL) continue;
+
+      w = _EntryList  ;
+      if (w != NULL) {
+          guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+          ExitEpilog (Self, w) ;
+          return ;
+      }
+   }
+}
+
+// ExitSuspendEquivalent:
+// A faster alternate to handle_special_suspend_equivalent_condition()
+//
+// handle_special_suspend_equivalent_condition() unconditionally
+// acquires the SR_lock.  On some platforms uncontended MutexLocker()
+// operations have high latency.  Note that in ::enter() we call HSSEC
+// while holding the monitor, so we effectively lengthen the critical sections.
+//
+// There are a number of possible solutions:
+//
+// A.  To ameliorate the problem we might also defer state transitions
+//     to as late as possible -- just prior to parking.
+//     Given that, we'd call HSSEC after having returned from park(),
+//     but before attempting to acquire the monitor.  This is only a
+//     partial solution.  It avoids calling HSSEC while holding the
+//     monitor (good), but it still increases successor reacquisition latency --
+//     the interval between unparking a successor and the time the successor
+//     resumes and retries the lock.  See ReenterI(), which defers state transitions.
+//     If we use this technique we can also avoid EnterI()-exit() loop
+//     in ::enter() where we iteratively drop the lock and then attempt
+//     to reacquire it after suspending.
+//
+// B.  In the future we might fold all the suspend bits into a
+//     composite per-thread suspend flag and then update it with CAS().
+//     Alternately, a Dekker-like mechanism with multiple variables
+//     would suffice:
+//       ST Self->_suspend_equivalent = false
+//       MEMBAR
+//       LD Self_>_suspend_flags
+//
+
+
+bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
+   int Mode = Knob_FastHSSEC ;
+   if (Mode && !jSelf->is_external_suspend()) {
+      assert (jSelf->is_suspend_equivalent(), "invariant") ;
+      jSelf->clear_suspend_equivalent() ;
+      if (2 == Mode) OrderAccess::storeload() ;
+      if (!jSelf->is_external_suspend()) return false ;
+      // We raced a suspension -- fall thru into the slow path
+      TEVENT (ExitSuspendEquivalent - raced) ;
+      jSelf->set_suspend_equivalent() ;
+   }
+   return jSelf->handle_special_suspend_equivalent_condition() ;
+}
+
+
+void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
+   assert (_owner == Self, "invariant") ;
+
+   // Exit protocol:
+   // 1. ST _succ = wakee
+   // 2. membar #loadstore|#storestore;
+   // 2. ST _owner = NULL
+   // 3. unpark(wakee)
+
+   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
+   ParkEvent * Trigger = Wakee->_event ;
+
+   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
+   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
+   // out-of-scope (non-extant).
+   Wakee  = NULL ;
+
+   // Drop the lock
+   OrderAccess::release_store_ptr (&_owner, NULL) ;
+   OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
+
+   if (SafepointSynchronize::do_call_back()) {
+      TEVENT (unpark before SAFEPOINT) ;
+   }
+
+   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
+   Trigger->unpark() ;
+
+   // Maintain stats and report events to JVMTI
+   if (ObjectMonitor::_sync_Parks != NULL) {
+      ObjectMonitor::_sync_Parks->inc() ;
+   }
+}
+
+
+// -----------------------------------------------------------------------------
+// Class Loader deadlock handling.
+//
+// complete_exit exits a lock returning recursion count
+// complete_exit/reenter operate as a wait without waiting
+// complete_exit requires an inflated monitor
+// The _owner field is not always the Thread addr even with an
+// inflated monitor, e.g. the monitor can be inflated by a non-owning
+// thread due to contention.
+intptr_t ObjectMonitor::complete_exit(TRAPS) {
+   Thread * const Self = THREAD;
+   assert(Self->is_Java_thread(), "Must be Java thread!");
+   JavaThread *jt = (JavaThread *)THREAD;
+
+   DeferredInitialize();
+
+   if (THREAD != _owner) {
+    if (THREAD->is_lock_owned ((address)_owner)) {
+       assert(_recursions == 0, "internal state error");
+       _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
+       _recursions = 0 ;
+       OwnerIsThread = 1 ;
+    }
+   }
+
+   guarantee(Self == _owner, "complete_exit not owner");
+   intptr_t save = _recursions; // record the old recursion count
+   _recursions = 0;        // set the recursion level to be 0
+   exit (Self) ;           // exit the monitor
+   guarantee (_owner != Self, "invariant");
+   return save;
+}
+
+// reenter() enters a lock and sets recursion count
+// complete_exit/reenter operate as a wait without waiting
+void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
+   Thread * const Self = THREAD;
+   assert(Self->is_Java_thread(), "Must be Java thread!");
+   JavaThread *jt = (JavaThread *)THREAD;
+
+   guarantee(_owner != Self, "reenter already owner");
+   enter (THREAD);       // enter the monitor
+   guarantee (_recursions == 0, "reenter recursion");
+   _recursions = recursions;
+   return;
+}
+
+
+// -----------------------------------------------------------------------------
+// A macro is used below because there may already be a pending
+// exception which should not abort the execution of the routines
+// which use this (which is why we don't put this into check_slow and
+// call it with a CHECK argument).
+
+#define CHECK_OWNER()                                                             \
+  do {                                                                            \
+    if (THREAD != _owner) {                                                       \
+      if (THREAD->is_lock_owned((address) _owner)) {                              \
+        _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
+        _recursions = 0;                                                          \
+        OwnerIsThread = 1 ;                                                       \
+      } else {                                                                    \
+        TEVENT (Throw IMSX) ;                                                     \
+        THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
+      }                                                                           \
+    }                                                                             \
+  } while (false)
+
+// check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
+// TODO-FIXME: remove check_slow() -- it's likely dead.
+
+void ObjectMonitor::check_slow(TRAPS) {
+  TEVENT (check_slow - throw IMSX) ;
+  assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
+  THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
+}
+
+static int Adjust (volatile int * adr, int dx) {
+  int v ;
+  for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
+  return v ;
+}
+// -----------------------------------------------------------------------------
+// Wait/Notify/NotifyAll
+//
+// Note: a subset of changes to ObjectMonitor::wait()
+// will need to be replicated in complete_exit above
+void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
+   Thread * const Self = THREAD ;
+   assert(Self->is_Java_thread(), "Must be Java thread!");
+   JavaThread *jt = (JavaThread *)THREAD;
+
+   DeferredInitialize () ;
+
+   // Throw IMSX or IEX.
+   CHECK_OWNER();
+
+   // check for a pending interrupt
+   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+     // post monitor waited event.  Note that this is past-tense, we are done waiting.
+     if (JvmtiExport::should_post_monitor_waited()) {
+        // Note: 'false' parameter is passed here because the
+        // wait was not timed out due to thread interrupt.
+        JvmtiExport::post_monitor_waited(jt, this, false);
+     }
+     TEVENT (Wait - Throw IEX) ;
+     THROW(vmSymbols::java_lang_InterruptedException());
+     return ;
+   }
+   TEVENT (Wait) ;
+
+   assert (Self->_Stalled == 0, "invariant") ;
+   Self->_Stalled = intptr_t(this) ;
+   jt->set_current_waiting_monitor(this);
+
+   // create a node to be put into the queue
+   // Critically, after we reset() the event but prior to park(), we must check
+   // for a pending interrupt.
+   ObjectWaiter node(Self);
+   node.TState = ObjectWaiter::TS_WAIT ;
+   Self->_ParkEvent->reset() ;
+   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
+
+   // Enter the waiting queue, which is a circular doubly linked list in this case
+   // but it could be a priority queue or any data structure.
+   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
+   // by the the owner of the monitor *except* in the case where park()
+   // returns because of a timeout of interrupt.  Contention is exceptionally rare
+   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+
+   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
+   AddWaiter (&node) ;
+   Thread::SpinRelease (&_WaitSetLock) ;
+
+   if ((SyncFlags & 4) == 0) {
+      _Responsible = NULL ;
+   }
+   intptr_t save = _recursions; // record the old recursion count
+   _waiters++;                  // increment the number of waiters
+   _recursions = 0;             // set the recursion level to be 1
+   exit (Self) ;                    // exit the monitor
+   guarantee (_owner != Self, "invariant") ;
+
+   // As soon as the ObjectMonitor's ownership is dropped in the exit()
+   // call above, another thread can enter() the ObjectMonitor, do the
+   // notify(), and exit() the ObjectMonitor. If the other thread's
+   // exit() call chooses this thread as the successor and the unpark()
+   // call happens to occur while this thread is posting a
+   // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
+   // handler using RawMonitors and consuming the unpark().
+   //
+   // To avoid the problem, we re-post the event. This does no harm
+   // even if the original unpark() was not consumed because we are the
+   // chosen successor for this monitor.
+   if (node._notified != 0 && _succ == Self) {
+      node._event->unpark();
+   }
+
+   // The thread is on the WaitSet list - now park() it.
+   // On MP systems it's conceivable that a brief spin before we park
+   // could be profitable.
+   //
+   // TODO-FIXME: change the following logic to a loop of the form
+   //   while (!timeout && !interrupted && _notified == 0) park()
+
+   int ret = OS_OK ;
+   int WasNotified = 0 ;
+   { // State transition wrappers
+     OSThread* osthread = Self->osthread();
+     OSThreadWaitState osts(osthread, true);
+     {
+       ThreadBlockInVM tbivm(jt);
+       // Thread is in thread_blocked state and oop access is unsafe.
+       jt->set_suspend_equivalent();
+
+       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
+           // Intentionally empty
+       } else
+       if (node._notified == 0) {
+         if (millis <= 0) {
+            Self->_ParkEvent->park () ;
+         } else {
+            ret = Self->_ParkEvent->park (millis) ;
+         }
+       }
+
+       // were we externally suspended while we were waiting?
+       if (ExitSuspendEquivalent (jt)) {
+          // TODO-FIXME: add -- if succ == Self then succ = null.
+          jt->java_suspend_self();
+       }
+
+     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
+
+
+     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
+     // from the WaitSet to the EntryList.
+     // See if we need to remove Node from the WaitSet.
+     // We use double-checked locking to avoid grabbing _WaitSetLock
+     // if the thread is not on the wait queue.
+     //
+     // Note that we don't need a fence before the fetch of TState.
+     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
+     // written by the is thread. (perhaps the fetch might even be satisfied
+     // by a look-aside into the processor's own store buffer, although given
+     // the length of the code path between the prior ST and this load that's
+     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
+     // then we'll acquire the lock and then re-fetch a fresh TState value.
+     // That is, we fail toward safety.
+
+     if (node.TState == ObjectWaiter::TS_WAIT) {
+         Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
+         if (node.TState == ObjectWaiter::TS_WAIT) {
+            DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
+            assert(node._notified == 0, "invariant");
+            node.TState = ObjectWaiter::TS_RUN ;
+         }
+         Thread::SpinRelease (&_WaitSetLock) ;
+     }
+
+     // The thread is now either on off-list (TS_RUN),
+     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
+     // The Node's TState variable is stable from the perspective of this thread.
+     // No other threads will asynchronously modify TState.
+     guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
+     OrderAccess::loadload() ;
+     if (_succ == Self) _succ = NULL ;
+     WasNotified = node._notified ;
+
+     // Reentry phase -- reacquire the monitor.
+     // re-enter contended monitor after object.wait().
+     // retain OBJECT_WAIT state until re-enter successfully completes
+     // Thread state is thread_in_vm and oop access is again safe,
+     // although the raw address of the object may have changed.
+     // (Don't cache naked oops over safepoints, of course).
+
+     // post monitor waited event. Note that this is past-tense, we are done waiting.
+     if (JvmtiExport::should_post_monitor_waited()) {
+       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+     }
+     OrderAccess::fence() ;
+
+     assert (Self->_Stalled != 0, "invariant") ;
+     Self->_Stalled = 0 ;
+
+     assert (_owner != Self, "invariant") ;
+     ObjectWaiter::TStates v = node.TState ;
+     if (v == ObjectWaiter::TS_RUN) {
+         enter (Self) ;
+     } else {
+         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
+         ReenterI (Self, &node) ;
+         node.wait_reenter_end(this);
+     }
+
+     // Self has reacquired the lock.
+     // Lifecycle - the node representing Self must not appear on any queues.
+     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
+     // want residual elements associated with this thread left on any lists.
+     guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
+     assert    (_owner == Self, "invariant") ;
+     assert    (_succ != Self , "invariant") ;
+   } // OSThreadWaitState()
+
+   jt->set_current_waiting_monitor(NULL);
+
+   guarantee (_recursions == 0, "invariant") ;
+   _recursions = save;     // restore the old recursion count
+   _waiters--;             // decrement the number of waiters
+
+   // Verify a few postconditions
+   assert (_owner == Self       , "invariant") ;
+   assert (_succ  != Self       , "invariant") ;
+   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+
+   if (SyncFlags & 32) {
+      OrderAccess::fence() ;
+   }
+
+   // check if the notification happened
+   if (!WasNotified) {
+     // no, it could be timeout or Thread.interrupt() or both
+     // check for interrupt event, otherwise it is timeout
+     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+       TEVENT (Wait - throw IEX from epilog) ;
+       THROW(vmSymbols::java_lang_InterruptedException());
+     }
+   }
+
+   // NOTE: Spurious wake up will be consider as timeout.
+   // Monitor notify has precedence over thread interrupt.
+}
+
+
+// Consider:
+// If the lock is cool (cxq == null && succ == null) and we're on an MP system
+// then instead of transferring a thread from the WaitSet to the EntryList
+// we might just dequeue a thread from the WaitSet and directly unpark() it.
+
+void ObjectMonitor::notify(TRAPS) {
+  CHECK_OWNER();
+  if (_WaitSet == NULL) {
+     TEVENT (Empty-Notify) ;
+     return ;
+  }
+  DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
+
+  int Policy = Knob_MoveNotifyee ;
+
+  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
+  ObjectWaiter * iterator = DequeueWaiter() ;
+  if (iterator != NULL) {
+     TEVENT (Notify1 - Transfer) ;
+     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+     guarantee (iterator->_notified == 0, "invariant") ;
+     if (Policy != 4) {
+        iterator->TState = ObjectWaiter::TS_ENTER ;
+     }
+     iterator->_notified = 1 ;
+
+     ObjectWaiter * List = _EntryList ;
+     if (List != NULL) {
+        assert (List->_prev == NULL, "invariant") ;
+        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        assert (List != iterator, "invariant") ;
+     }
+
+     if (Policy == 0) {       // prepend to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+             List->_prev = iterator ;
+             iterator->_next = List ;
+             iterator->_prev = NULL ;
+             _EntryList = iterator ;
+        }
+     } else
+     if (Policy == 1) {      // append to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+            // CONSIDER:  finding the tail currently requires a linear-time walk of
+            // the EntryList.  We can make tail access constant-time by converting to
+            // a CDLL instead of using our current DLL.
+            ObjectWaiter * Tail ;
+            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
+            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
+            Tail->_next = iterator ;
+            iterator->_prev = Tail ;
+            iterator->_next = NULL ;
+        }
+     } else
+     if (Policy == 2) {      // prepend to cxq
+         // prepend to cxq
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+            iterator->TState = ObjectWaiter::TS_CXQ ;
+            for (;;) {
+                ObjectWaiter * Front = _cxq ;
+                iterator->_next = Front ;
+                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+                    break ;
+                }
+            }
+         }
+     } else
+     if (Policy == 3) {      // append to cxq
+        iterator->TState = ObjectWaiter::TS_CXQ ;
+        for (;;) {
+            ObjectWaiter * Tail ;
+            Tail = _cxq ;
+            if (Tail == NULL) {
+                iterator->_next = NULL ;
+                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+                   break ;
+                }
+            } else {
+                while (Tail->_next != NULL) Tail = Tail->_next ;
+                Tail->_next = iterator ;
+                iterator->_prev = Tail ;
+                iterator->_next = NULL ;
+                break ;
+            }
+        }
+     } else {
+        ParkEvent * ev = iterator->_event ;
+        iterator->TState = ObjectWaiter::TS_RUN ;
+        OrderAccess::fence() ;
+        ev->unpark() ;
+     }
+
+     if (Policy < 4) {
+       iterator->wait_reenter_begin(this);
+     }
+
+     // _WaitSetLock protects the wait queue, not the EntryList.  We could
+     // move the add-to-EntryList operation, above, outside the critical section
+     // protected by _WaitSetLock.  In practice that's not useful.  With the
+     // exception of  wait() timeouts and interrupts the monitor owner
+     // is the only thread that grabs _WaitSetLock.  There's almost no contention
+     // on _WaitSetLock so it's not profitable to reduce the length of the
+     // critical section.
+  }
+
+  Thread::SpinRelease (&_WaitSetLock) ;
+
+  if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
+     ObjectMonitor::_sync_Notifications->inc() ;
+  }
+}
+
+
+void ObjectMonitor::notifyAll(TRAPS) {
+  CHECK_OWNER();
+  ObjectWaiter* iterator;
+  if (_WaitSet == NULL) {
+      TEVENT (Empty-NotifyAll) ;
+      return ;
+  }
+  DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
+
+  int Policy = Knob_MoveNotifyee ;
+  int Tally = 0 ;
+  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
+
+  for (;;) {
+     iterator = DequeueWaiter () ;
+     if (iterator == NULL) break ;
+     TEVENT (NotifyAll - Transfer1) ;
+     ++Tally ;
+
+     // Disposition - what might we do with iterator ?
+     // a.  add it directly to the EntryList - either tail or head.
+     // b.  push it onto the front of the _cxq.
+     // For now we use (a).
+
+     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+     guarantee (iterator->_notified == 0, "invariant") ;
+     iterator->_notified = 1 ;
+     if (Policy != 4) {
+        iterator->TState = ObjectWaiter::TS_ENTER ;
+     }
+
+     ObjectWaiter * List = _EntryList ;
+     if (List != NULL) {
+        assert (List->_prev == NULL, "invariant") ;
+        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+        assert (List != iterator, "invariant") ;
+     }
+
+     if (Policy == 0) {       // prepend to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+             List->_prev = iterator ;
+             iterator->_next = List ;
+             iterator->_prev = NULL ;
+             _EntryList = iterator ;
+        }
+     } else
+     if (Policy == 1) {      // append to EntryList
+         if (List == NULL) {
+             iterator->_next = iterator->_prev = NULL ;
+             _EntryList = iterator ;
+         } else {
+            // CONSIDER:  finding the tail currently requires a linear-time walk of
+            // the EntryList.  We can make tail access constant-time by converting to
+            // a CDLL instead of using our current DLL.
+            ObjectWaiter * Tail ;
+            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
+            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
+            Tail->_next = iterator ;
+            iterator->_prev = Tail ;
+            iterator->_next = NULL ;
+        }
+     } else
+     if (Policy == 2) {      // prepend to cxq
+         // prepend to cxq
+         iterator->TState = ObjectWaiter::TS_CXQ ;
+         for (;;) {
+             ObjectWaiter * Front = _cxq ;
+             iterator->_next = Front ;
+             if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+                 break ;
+             }
+         }
+     } else
+     if (Policy == 3) {      // append to cxq
+        iterator->TState = ObjectWaiter::TS_CXQ ;
+        for (;;) {
+            ObjectWaiter * Tail ;
+            Tail = _cxq ;
+            if (Tail == NULL) {
+                iterator->_next = NULL ;
+                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+                   break ;
+                }
+            } else {
+                while (Tail->_next != NULL) Tail = Tail->_next ;
+                Tail->_next = iterator ;
+                iterator->_prev = Tail ;
+                iterator->_next = NULL ;
+                break ;
+            }
+        }
+     } else {
+        ParkEvent * ev = iterator->_event ;
+        iterator->TState = ObjectWaiter::TS_RUN ;
+        OrderAccess::fence() ;
+        ev->unpark() ;
+     }
+
+     if (Policy < 4) {
+       iterator->wait_reenter_begin(this);
+     }
+
+     // _WaitSetLock protects the wait queue, not the EntryList.  We could
+     // move the add-to-EntryList operation, above, outside the critical section
+     // protected by _WaitSetLock.  In practice that's not useful.  With the
+     // exception of  wait() timeouts and interrupts the monitor owner
+     // is the only thread that grabs _WaitSetLock.  There's almost no contention
+     // on _WaitSetLock so it's not profitable to reduce the length of the
+     // critical section.
+  }
+
+  Thread::SpinRelease (&_WaitSetLock) ;
+
+  if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
+     ObjectMonitor::_sync_Notifications->inc(Tally) ;
+  }
+}
+
+// -----------------------------------------------------------------------------
+// Adaptive Spinning Support
+//
+// Adaptive spin-then-block - rational spinning
+//
+// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
+// algorithm.  On high order SMP systems it would be better to start with
+// a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
+// a contending thread could enqueue itself on the cxq and then spin locally
+// on a thread-specific variable such as its ParkEvent._Event flag.
+// That's left as an exercise for the reader.  Note that global spinning is
+// not problematic on Niagara, as the L2$ serves the interconnect and has both
+// low latency and massive bandwidth.
+//
+// Broadly, we can fix the spin frequency -- that is, the % of contended lock
+// acquisition attempts where we opt to spin --  at 100% and vary the spin count
+// (duration) or we can fix the count at approximately the duration of
+// a context switch and vary the frequency.   Of course we could also
+// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
+// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
+//
+// This implementation varies the duration "D", where D varies with
+// the success rate of recent spin attempts. (D is capped at approximately
+// length of a round-trip context switch).  The success rate for recent
+// spin attempts is a good predictor of the success rate of future spin
+// attempts.  The mechanism adapts automatically to varying critical
+// section length (lock modality), system load and degree of parallelism.
+// D is maintained per-monitor in _SpinDuration and is initialized
+// optimistically.  Spin frequency is fixed at 100%.
+//
+// Note that _SpinDuration is volatile, but we update it without locks
+// or atomics.  The code is designed so that _SpinDuration stays within
+// a reasonable range even in the presence of races.  The arithmetic
+// operations on _SpinDuration are closed over the domain of legal values,
+// so at worst a race will install and older but still legal value.
+// At the very worst this introduces some apparent non-determinism.
+// We might spin when we shouldn't or vice-versa, but since the spin
+// count are relatively short, even in the worst case, the effect is harmless.
+//
+// Care must be taken that a low "D" value does not become an
+// an absorbing state.  Transient spinning failures -- when spinning
+// is overall profitable -- should not cause the system to converge
+// on low "D" values.  We want spinning to be stable and predictable
+// and fairly responsive to change and at the same time we don't want
+// it to oscillate, become metastable, be "too" non-deterministic,
+// or converge on or enter undesirable stable absorbing states.
+//
+// We implement a feedback-based control system -- using past behavior
+// to predict future behavior.  We face two issues: (a) if the
+// input signal is random then the spin predictor won't provide optimal
+// results, and (b) if the signal frequency is too high then the control
+// system, which has some natural response lag, will "chase" the signal.
+// (b) can arise from multimodal lock hold times.  Transient preemption
+// can also result in apparent bimodal lock hold times.
+// Although sub-optimal, neither condition is particularly harmful, as
+// in the worst-case we'll spin when we shouldn't or vice-versa.
+// The maximum spin duration is rather short so the failure modes aren't bad.
+// To be conservative, I've tuned the gain in system to bias toward
+// _not spinning.  Relatedly, the system can sometimes enter a mode where it
+// "rings" or oscillates between spinning and not spinning.  This happens
+// when spinning is just on the cusp of profitability, however, so the
+// situation is not dire.  The state is benign -- there's no need to add
+// hysteresis control to damp the transition rate between spinning and
+// not spinning.
+//
+
+intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
+int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
+
+// Spinning: Fixed frequency (100%), vary duration
+
+
+int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
+
+    // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
+    int ctr = Knob_FixedSpin ;
+    if (ctr != 0) {
+        while (--ctr >= 0) {
+            if (TryLock (Self) > 0) return 1 ;
+            SpinPause () ;
+        }
+        return 0 ;
+    }
+
+    for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
+      if (TryLock(Self) > 0) {
+        // Increase _SpinDuration ...
+        // Note that we don't clamp SpinDuration precisely at SpinLimit.
+        // Raising _SpurDuration to the poverty line is key.
+        int x = _SpinDuration ;
+        if (x < Knob_SpinLimit) {
+           if (x < Knob_Poverty) x = Knob_Poverty ;
+           _SpinDuration = x + Knob_BonusB ;
+        }
+        return 1 ;
+      }
+      SpinPause () ;
+    }
+
+    // Admission control - verify preconditions for spinning
+    //
+    // We always spin a little bit, just to prevent _SpinDuration == 0 from
+    // becoming an absorbing state.  Put another way, we spin briefly to
+    // sample, just in case the system load, parallelism, contention, or lock
+    // modality changed.
+    //
+    // Consider the following alternative:
+    // Periodically set _SpinDuration = _SpinLimit and try a long/full
+    // spin attempt.  "Periodically" might mean after a tally of
+    // the # of failed spin attempts (or iterations) reaches some threshold.
+    // This takes us into the realm of 1-out-of-N spinning, where we
+    // hold the duration constant but vary the frequency.
+
+    ctr = _SpinDuration  ;
+    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
+    if (ctr <= 0) return 0 ;
+
+    if (Knob_SuccRestrict && _succ != NULL) return 0 ;
+    if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
+       TEVENT (Spin abort - notrunnable [TOP]);
+       return 0 ;
+    }
+
+    int MaxSpin = Knob_MaxSpinners ;
+    if (MaxSpin >= 0) {
+       if (_Spinner > MaxSpin) {
+          TEVENT (Spin abort -- too many spinners) ;
+          return 0 ;
+       }
+       // Slighty racy, but benign ...
+       Adjust (&_Spinner, 1) ;
+    }
+
+    // We're good to spin ... spin ingress.
+    // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
+    // when preparing to LD...CAS _owner, etc and the CAS is likely
+    // to succeed.
+    int hits    = 0 ;
+    int msk     = 0 ;
+    int caspty  = Knob_CASPenalty ;
+    int oxpty   = Knob_OXPenalty ;
+    int sss     = Knob_SpinSetSucc ;
+    if (sss && _succ == NULL ) _succ = Self ;
+    Thread * prv = NULL ;
+
+    // There are three ways to exit the following loop:
+    // 1.  A successful spin where this thread has acquired the lock.
+    // 2.  Spin failure with prejudice
+    // 3.  Spin failure without prejudice
+
+    while (--ctr >= 0) {
+
+      // Periodic polling -- Check for pending GC
+      // Threads may spin while they're unsafe.
+      // We don't want spinning threads to delay the JVM from reaching
+      // a stop-the-world safepoint or to steal cycles from GC.
+      // If we detect a pending safepoint we abort in order that
+      // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
+      // this thread, if safe, doesn't steal cycles from GC.
+      // This is in keeping with the "no loitering in runtime" rule.
+      // We periodically check to see if there's a safepoint pending.
+      if ((ctr & 0xFF) == 0) {
+         if (SafepointSynchronize::do_call_back()) {
+            TEVENT (Spin: safepoint) ;
+            goto Abort ;           // abrupt spin egress
+         }
+         if (Knob_UsePause & 1) SpinPause () ;
+
+         int (*scb)(intptr_t,int) = SpinCallbackFunction ;
+         if (hits > 50 && scb != NULL) {
+            int abend = (*scb)(SpinCallbackArgument, 0) ;
+         }
+      }
+
+      if (Knob_UsePause & 2) SpinPause() ;
+
+      // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
+      // This is useful on classic SMP systems, but is of less utility on
+      // N1-style CMT platforms.
+      //
+      // Trade-off: lock acquisition latency vs coherency bandwidth.
+      // Lock hold times are typically short.  A histogram
+      // of successful spin attempts shows that we usually acquire
+      // the lock early in the spin.  That suggests we want to
+      // sample _owner frequently in the early phase of the spin,
+      // but then back-off and sample less frequently as the spin
+      // progresses.  The back-off makes a good citizen on SMP big
+      // SMP systems.  Oversampling _owner can consume excessive
+      // coherency bandwidth.  Relatedly, if we _oversample _owner we
+      // can inadvertently interfere with the the ST m->owner=null.
+      // executed by the lock owner.
+      if (ctr & msk) continue ;
+      ++hits ;
+      if ((hits & 0xF) == 0) {
+        // The 0xF, above, corresponds to the exponent.
+        // Consider: (msk+1)|msk
+        msk = ((msk << 2)|3) & BackOffMask ;
+      }
+
+      // Probe _owner with TATAS
+      // If this thread observes the monitor transition or flicker
+      // from locked to unlocked to locked, then the odds that this
+      // thread will acquire the lock in this spin attempt go down
+      // considerably.  The same argument applies if the CAS fails
+      // or if we observe _owner change from one non-null value to
+      // another non-null value.   In such cases we might abort
+      // the spin without prejudice or apply a "penalty" to the
+      // spin count-down variable "ctr", reducing it by 100, say.
+
+      Thread * ox = (Thread *) _owner ;
+      if (ox == NULL) {
+         ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
+         if (ox == NULL) {
+            // The CAS succeeded -- this thread acquired ownership
+            // Take care of some bookkeeping to exit spin state.
+            if (sss && _succ == Self) {
+               _succ = NULL ;
+            }
+            if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
+
+            // Increase _SpinDuration :
+            // The spin was successful (profitable) so we tend toward
+            // longer spin attempts in the future.
+            // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
+            // If we acquired the lock early in the spin cycle it
+            // makes sense to increase _SpinDuration proportionally.
+            // Note that we don't clamp SpinDuration precisely at SpinLimit.
+            int x = _SpinDuration ;
+            if (x < Knob_SpinLimit) {
+                if (x < Knob_Poverty) x = Knob_Poverty ;
+                _SpinDuration = x + Knob_Bonus ;
+            }
+            return 1 ;
+         }
+
+         // The CAS failed ... we can take any of the following actions:
+         // * penalize: ctr -= Knob_CASPenalty
+         // * exit spin with prejudice -- goto Abort;
+         // * exit spin without prejudice.
+         // * Since CAS is high-latency, retry again immediately.
+         prv = ox ;
+         TEVENT (Spin: cas failed) ;
+         if (caspty == -2) break ;
+         if (caspty == -1) goto Abort ;
+         ctr -= caspty ;
+         continue ;
+      }
+
+      // Did lock ownership change hands ?
+      if (ox != prv && prv != NULL ) {
+          TEVENT (spin: Owner changed)
+          if (oxpty == -2) break ;
+          if (oxpty == -1) goto Abort ;
+          ctr -= oxpty ;
+      }
+      prv = ox ;
+
+      // Abort the spin if the owner is not executing.
+      // The owner must be executing in order to drop the lock.
+      // Spinning while the owner is OFFPROC is idiocy.
+      // Consider: ctr -= RunnablePenalty ;
+      if (Knob_OState && NotRunnable (Self, ox)) {
+         TEVENT (Spin abort - notrunnable);
+         goto Abort ;
+      }
+      if (sss && _succ == NULL ) _succ = Self ;
+   }
+
+   // Spin failed with prejudice -- reduce _SpinDuration.
+   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
+   // AIMD is globally stable.
+   TEVENT (Spin failure) ;
+   {
+     int x = _SpinDuration ;
+     if (x > 0) {
+        // Consider an AIMD scheme like: x -= (x >> 3) + 100
+        // This is globally sample and tends to damp the response.
+        x -= Knob_Penalty ;
+        if (x < 0) x = 0 ;
+        _SpinDuration = x ;
+     }
+   }
+
+ Abort:
+   if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
+   if (sss && _succ == Self) {
+      _succ = NULL ;
+      // Invariant: after setting succ=null a contending thread
+      // must recheck-retry _owner before parking.  This usually happens
+      // in the normal usage of TrySpin(), but it's safest
+      // to make TrySpin() as foolproof as possible.
+      OrderAccess::fence() ;
+      if (TryLock(Self) > 0) return 1 ;
+   }
+   return 0 ;
+}
+
+// NotRunnable() -- informed spinning
+//
+// Don't bother spinning if the owner is not eligible to drop the lock.
+// Peek at the owner's schedctl.sc_state and Thread._thread_values and
+// spin only if the owner thread is _thread_in_Java or _thread_in_vm.
+// The thread must be runnable in order to drop the lock in timely fashion.
+// If the _owner is not runnable then spinning will not likely be
+// successful (profitable).
+//
+// Beware -- the thread referenced by _owner could have died
+// so a simply fetch from _owner->_thread_state might trap.
+// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
+// Because of the lifecycle issues the schedctl and _thread_state values
+// observed by NotRunnable() might be garbage.  NotRunnable must
+// tolerate this and consider the observed _thread_state value
+// as advisory.
+//
+// Beware too, that _owner is sometimes a BasicLock address and sometimes
+// a thread pointer.  We differentiate the two cases with OwnerIsThread.
+// Alternately, we might tag the type (thread pointer vs basiclock pointer)
+// with the LSB of _owner.  Another option would be to probablistically probe
+// the putative _owner->TypeTag value.
+//
+// Checking _thread_state isn't perfect.  Even if the thread is
+// in_java it might be blocked on a page-fault or have been preempted
+// and sitting on a ready/dispatch queue.  _thread state in conjunction
+// with schedctl.sc_state gives us a good picture of what the
+// thread is doing, however.
+//
+// TODO: check schedctl.sc_state.
+// We'll need to use SafeFetch32() to read from the schedctl block.
+// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
+//
+// The return value from NotRunnable() is *advisory* -- the
+// result is based on sampling and is not necessarily coherent.
+// The caller must tolerate false-negative and false-positive errors.
+// Spinning, in general, is probabilistic anyway.
+
+
+int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
+    // Check either OwnerIsThread or ox->TypeTag == 2BAD.
+    if (!OwnerIsThread) return 0 ;
+
+    if (ox == NULL) return 0 ;
+
+    // Avoid transitive spinning ...
+    // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
+    // Immediately after T1 acquires L it's possible that T2, also
+    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
+    // This occurs transiently after T1 acquired L but before
+    // T1 managed to clear T1.Stalled.  T2 does not need to abort
+    // its spin in this circumstance.
+    intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
+
+    if (BlockedOn == 1) return 1 ;
+    if (BlockedOn != 0) {
+      return BlockedOn != intptr_t(this) && _owner == ox ;
+    }
+
+    assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
+    int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
+    // consider also: jst != _thread_in_Java -- but that's overspecific.
+    return jst == _thread_blocked || jst == _thread_in_native ;
+}
+
+
+// -----------------------------------------------------------------------------
+// WaitSet management ...
+
+ObjectWaiter::ObjectWaiter(Thread* thread) {
+  _next     = NULL;
+  _prev     = NULL;
+  _notified = 0;
+  TState    = TS_RUN ;
+  _thread   = thread;
+  _event    = thread->_ParkEvent ;
+  _active   = false;
+  assert (_event != NULL, "invariant") ;
+}
+
+void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
+  JavaThread *jt = (JavaThread *)this->_thread;
+  _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
+}
+
+void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
+  JavaThread *jt = (JavaThread *)this->_thread;
+  JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
+}
+
+inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
+  assert(node != NULL, "should not dequeue NULL node");
+  assert(node->_prev == NULL, "node already in list");
+  assert(node->_next == NULL, "node already in list");
+  // put node at end of queue (circular doubly linked list)
+  if (_WaitSet == NULL) {
+    _WaitSet = node;
+    node->_prev = node;
+    node->_next = node;
+  } else {
+    ObjectWaiter* head = _WaitSet ;
+    ObjectWaiter* tail = head->_prev;
+    assert(tail->_next == head, "invariant check");
+    tail->_next = node;
+    head->_prev = node;
+    node->_next = head;
+    node->_prev = tail;
+  }
+}
+
+inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
+  // dequeue the very first waiter
+  ObjectWaiter* waiter = _WaitSet;
+  if (waiter) {
+    DequeueSpecificWaiter(waiter);
+  }
+  return waiter;
+}
+
+inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
+  assert(node != NULL, "should not dequeue NULL node");
+  assert(node->_prev != NULL, "node already removed from list");
+  assert(node->_next != NULL, "node already removed from list");
+  // when the waiter has woken up because of interrupt,
+  // timeout or other spurious wake-up, dequeue the
+  // waiter from waiting list
+  ObjectWaiter* next = node->_next;
+  if (next == node) {
+    assert(node->_prev == node, "invariant check");
+    _WaitSet = NULL;
+  } else {
+    ObjectWaiter* prev = node->_prev;
+    assert(prev->_next == node, "invariant check");
+    assert(next->_prev == node, "invariant check");
+    next->_prev = prev;
+    prev->_next = next;
+    if (_WaitSet == node) {
+      _WaitSet = next;
+    }
+  }
+  node->_next = NULL;
+  node->_prev = NULL;
+}
+
+// -----------------------------------------------------------------------------
+// PerfData support
+PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL ;
+PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL ;
+PerfCounter * ObjectMonitor::_sync_Parks                       = NULL ;
+PerfCounter * ObjectMonitor::_sync_EmptyNotifications          = NULL ;
+PerfCounter * ObjectMonitor::_sync_Notifications               = NULL ;
+PerfCounter * ObjectMonitor::_sync_PrivateA                    = NULL ;
+PerfCounter * ObjectMonitor::_sync_PrivateB                    = NULL ;
+PerfCounter * ObjectMonitor::_sync_SlowExit                    = NULL ;
+PerfCounter * ObjectMonitor::_sync_SlowEnter                   = NULL ;
+PerfCounter * ObjectMonitor::_sync_SlowNotify                  = NULL ;
+PerfCounter * ObjectMonitor::_sync_SlowNotifyAll               = NULL ;
+PerfCounter * ObjectMonitor::_sync_FailedSpins                 = NULL ;
+PerfCounter * ObjectMonitor::_sync_SuccessfulSpins             = NULL ;
+PerfCounter * ObjectMonitor::_sync_MonInCirculation            = NULL ;
+PerfCounter * ObjectMonitor::_sync_MonScavenged                = NULL ;
+PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL ;
+PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL ;
+PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL ;
+
+// One-shot global initialization for the sync subsystem.
+// We could also defer initialization and initialize on-demand
+// the first time we call inflate().  Initialization would
+// be protected - like so many things - by the MonitorCache_lock.
+
+void ObjectMonitor::Initialize () {
+  static int InitializationCompleted = 0 ;
+  assert (InitializationCompleted == 0, "invariant") ;
+  InitializationCompleted = 1 ;
+  if (UsePerfData) {
+      EXCEPTION_MARK ;
+      #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
+      #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
+      NEWPERFCOUNTER(_sync_Inflations) ;
+      NEWPERFCOUNTER(_sync_Deflations) ;
+      NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
+      NEWPERFCOUNTER(_sync_FutileWakeups) ;
+      NEWPERFCOUNTER(_sync_Parks) ;
+      NEWPERFCOUNTER(_sync_EmptyNotifications) ;
+      NEWPERFCOUNTER(_sync_Notifications) ;
+      NEWPERFCOUNTER(_sync_SlowEnter) ;
+      NEWPERFCOUNTER(_sync_SlowExit) ;
+      NEWPERFCOUNTER(_sync_SlowNotify) ;
+      NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
+      NEWPERFCOUNTER(_sync_FailedSpins) ;
+      NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
+      NEWPERFCOUNTER(_sync_PrivateA) ;
+      NEWPERFCOUNTER(_sync_PrivateB) ;
+      NEWPERFCOUNTER(_sync_MonInCirculation) ;
+      NEWPERFCOUNTER(_sync_MonScavenged) ;
+      NEWPERFVARIABLE(_sync_MonExtant) ;
+      #undef NEWPERFCOUNTER
+  }
+}
+
+
+// Compile-time asserts
+// When possible, it's better to catch errors deterministically at
+// compile-time than at runtime.  The down-side to using compile-time
+// asserts is that error message -- often something about negative array
+// indices -- is opaque.
+
+#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
+
+void ObjectMonitor::ctAsserts() {
+  CTASSERT(offset_of (ObjectMonitor, _header) == 0);
+}
+
+
+static char * kvGet (char * kvList, const char * Key) {
+    if (kvList == NULL) return NULL ;
+    size_t n = strlen (Key) ;
+    char * Search ;
+    for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
+        if (strncmp (Search, Key, n) == 0) {
+            if (Search[n] == '=') return Search + n + 1 ;
+            if (Search[n] == 0)   return (char *) "1" ;
+        }
+    }
+    return NULL ;
+}
+
+static int kvGetInt (char * kvList, const char * Key, int Default) {
+    char * v = kvGet (kvList, Key) ;
+    int rslt = v ? ::strtol (v, NULL, 0) : Default ;
+    if (Knob_ReportSettings && v != NULL) {
+        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
+        ::fflush (stdout) ;
+    }
+    return rslt ;
+}
+
+void ObjectMonitor::DeferredInitialize () {
+  if (InitDone > 0) return ;
+  if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
+      while (InitDone != 1) ;
+      return ;
+  }
+
+  // One-shot global initialization ...
+  // The initialization is idempotent, so we don't need locks.
+  // In the future consider doing this via os::init_2().
+  // SyncKnobs consist of <Key>=<Value> pairs in the style
+  // of environment variables.  Start by converting ':' to NUL.
+
+  if (SyncKnobs == NULL) SyncKnobs = "" ;
+
+  size_t sz = strlen (SyncKnobs) ;
+  char * knobs = (char *) malloc (sz + 2) ;
+  if (knobs == NULL) {
+     vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
+     guarantee (0, "invariant") ;
+  }
+  strcpy (knobs, SyncKnobs) ;
+  knobs[sz+1] = 0 ;
+  for (char * p = knobs ; *p ; p++) {
+     if (*p == ':') *p = 0 ;
+  }
+
+  #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
+  SETKNOB(ReportSettings) ;
+  SETKNOB(Verbose) ;
+  SETKNOB(FixedSpin) ;
+  SETKNOB(SpinLimit) ;
+  SETKNOB(SpinBase) ;
+  SETKNOB(SpinBackOff);
+  SETKNOB(CASPenalty) ;
+  SETKNOB(OXPenalty) ;
+  SETKNOB(LogSpins) ;
+  SETKNOB(SpinSetSucc) ;
+  SETKNOB(SuccEnabled) ;
+  SETKNOB(SuccRestrict) ;
+  SETKNOB(Penalty) ;
+  SETKNOB(Bonus) ;
+  SETKNOB(BonusB) ;
+  SETKNOB(Poverty) ;
+  SETKNOB(SpinAfterFutile) ;
+  SETKNOB(UsePause) ;
+  SETKNOB(SpinEarly) ;
+  SETKNOB(OState) ;
+  SETKNOB(MaxSpinners) ;
+  SETKNOB(PreSpin) ;
+  SETKNOB(ExitPolicy) ;
+  SETKNOB(QMode);
+  SETKNOB(ResetEvent) ;
+  SETKNOB(MoveNotifyee) ;
+  SETKNOB(FastHSSEC) ;
+  #undef SETKNOB
+
+  if (os::is_MP()) {
+     BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
+     if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
+     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
+  } else {
+     Knob_SpinLimit = 0 ;
+     Knob_SpinBase  = 0 ;
+     Knob_PreSpin   = 0 ;
+     Knob_FixedSpin = -1 ;
+  }
+
+  if (Knob_LogSpins == 0) {
+     ObjectMonitor::_sync_FailedSpins = NULL ;
+  }
+
+  free (knobs) ;
+  OrderAccess::fence() ;
+  InitDone = 1 ;
+}
+
+#ifndef PRODUCT
+void ObjectMonitor::verify() {
+}
+
+void ObjectMonitor::print() {
+}
+#endif
--- a/src/share/vm/runtime/objectMonitor.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/objectMonitor.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -22,6 +22,32 @@
  *
  */
 
+
+// ObjectWaiter serves as a "proxy" or surrogate thread.
+// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
+// ParkEvent instead.  Beware, however, that the JVMTI code
+// knows about ObjectWaiters, so we'll have to reconcile that code.
+// See next_waiter(), first_waiter(), etc.
+
+class ObjectWaiter : public StackObj {
+ public:
+  enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
+  enum Sorted  { PREPEND, APPEND, SORTED } ;
+  ObjectWaiter * volatile _next;
+  ObjectWaiter * volatile _prev;
+  Thread*       _thread;
+  ParkEvent *   _event;
+  volatile int  _notified ;
+  volatile TStates TState ;
+  Sorted        _Sorted ;           // List placement disposition
+  bool          _active ;           // Contention monitoring is enabled
+ public:
+  ObjectWaiter(Thread* thread);
+
+  void wait_reenter_begin(ObjectMonitor *mon);
+  void wait_reenter_end(ObjectMonitor *mon);
+};
+
 // WARNING:
 //   This is a very sensitive and fragile class. DO NOT make any
 // change unless you are fully aware of the underlying semantics.
@@ -38,8 +64,6 @@
 // It is also used as RawMonitor by the JVMTI
 
 
-class ObjectWaiter;
-
 class ObjectMonitor {
  public:
   enum {
@@ -74,13 +98,16 @@
 
 
  public:
-  ObjectMonitor();
-  ~ObjectMonitor();
-
   markOop   header() const;
   void      set_header(markOop hdr);
 
-  intptr_t  is_busy() const;
+  intptr_t is_busy() const {
+    // TODO-FIXME: merge _count and _waiters.
+    // TODO-FIXME: assert _owner == null implies _recursions = 0
+    // TODO-FIXME: assert _WaitSet != null implies _count > 0
+    return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
+  }
+
   intptr_t  is_entered(Thread* current) const;
 
   void*     owner() const;
@@ -91,13 +118,58 @@
   intptr_t  count() const;
   void      set_count(intptr_t count);
   intptr_t  contentions() const ;
+  intptr_t  recursions() const                                         { return _recursions; }
 
   // JVM/DI GetMonitorInfo() needs this
-  Thread *  thread_of_waiter (ObjectWaiter *) ;
-  ObjectWaiter * first_waiter () ;
-  ObjectWaiter * next_waiter(ObjectWaiter* o);
+  ObjectWaiter* first_waiter()                                         { return _WaitSet; }
+  ObjectWaiter* next_waiter(ObjectWaiter* o)                           { return o->_next; }
+  Thread* thread_of_waiter(ObjectWaiter* o)                            { return o->_thread; }
+
+  // initialize the monitor, exception the semaphore, all other fields
+  // are simple integers or pointers
+  ObjectMonitor() {
+    _header       = NULL;
+    _count        = 0;
+    _waiters      = 0,
+    _recursions   = 0;
+    _object       = NULL;
+    _owner        = NULL;
+    _WaitSet      = NULL;
+    _WaitSetLock  = 0 ;
+    _Responsible  = NULL ;
+    _succ         = NULL ;
+    _cxq          = NULL ;
+    FreeNext      = NULL ;
+    _EntryList    = NULL ;
+    _SpinFreq     = 0 ;
+    _SpinClock    = 0 ;
+    OwnerIsThread = 0 ;
+  }
 
-  intptr_t  recursions() const { return _recursions; }
+  ~ObjectMonitor() {
+   // TODO: Add asserts ...
+   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+   // _count == 0 _EntryList  == NULL etc
+  }
+
+private:
+  void Recycle () {
+    // TODO: add stronger asserts ...
+    // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+    // _count == 0 EntryList  == NULL
+    // _recursions == 0 _WaitSet == NULL
+    // TODO: assert (is_busy()|_recursions) == 0
+    _succ          = NULL ;
+    _EntryList     = NULL ;
+    _cxq           = NULL ;
+    _WaitSet       = NULL ;
+    _recursions    = 0 ;
+    _SpinFreq      = 0 ;
+    _SpinClock     = 0 ;
+    OwnerIsThread  = 0 ;
+  }
+
+public:
 
   void*     object() const;
   void*     object_addr();
@@ -122,22 +194,9 @@
   intptr_t  complete_exit(TRAPS);
   void      reenter(intptr_t recursions, TRAPS);
 
-  int       raw_enter(TRAPS);
-  int       raw_exit(TRAPS);
-  int       raw_wait(jlong millis, bool interruptable, TRAPS);
-  int       raw_notify(TRAPS);
-  int       raw_notifyAll(TRAPS);
-
  private:
-  // JVMTI support -- remove ASAP
-  int       SimpleEnter (Thread * Self) ;
-  int       SimpleExit  (Thread * Self) ;
-  int       SimpleWait  (Thread * Self, jlong millis) ;
-  int       SimpleNotify (Thread * Self, bool All) ;
-
- private:
-  void      Recycle () ;
   void      AddWaiter (ObjectWaiter * waiter) ;
+  static    void DeferredInitialize();
 
   ObjectWaiter * DequeueWaiter () ;
   void      DequeueSpecificWaiter (ObjectWaiter * waiter) ;
@@ -172,13 +231,17 @@
   // The VM assumes write ordering wrt these fields, which can be
   // read from other threads.
 
+ protected:                         // protected for jvmtiRawMonitor
   void *  volatile _owner;          // pointer to owning thread OR BasicLock
   volatile intptr_t  _recursions;   // recursion count, 0 for first entry
+ private:
   int OwnerIsThread ;               // _owner is (Thread *) vs SP/BasicLock
   ObjectWaiter * volatile _cxq ;    // LL of recently-arrived threads blocked on entry.
                                     // The list is actually composed of WaitNodes, acting
                                     // as proxies for Threads.
+ protected:
   ObjectWaiter * volatile _EntryList ;     // Threads blocked on entry or reentry.
+ private:
   Thread * volatile _succ ;          // Heir presumptive thread - used for futile wakeup throttling
   Thread * volatile _Responsible ;
   int _PromptDrain ;                // rqst to drain cxq into EntryList ASAP
@@ -196,8 +259,12 @@
   volatile intptr_t  _count;        // reference count to prevent reclaimation/deflation
                                     // at stop-the-world time.  See deflate_idle_monitors().
                                     // _count is approximately |_WaitSet| + |_EntryList|
+ protected:
   volatile intptr_t  _waiters;      // number of waiting threads
+ private:
+ protected:
   ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
+ private:
   volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
 
  public:
@@ -205,4 +272,37 @@
   ObjectMonitor * FreeNext ;        // Free list linkage
   intptr_t StatA, StatsB ;
 
+ public:
+  static void Initialize () ;
+  static PerfCounter * _sync_ContendedLockAttempts ;
+  static PerfCounter * _sync_FutileWakeups ;
+  static PerfCounter * _sync_Parks ;
+  static PerfCounter * _sync_EmptyNotifications ;
+  static PerfCounter * _sync_Notifications ;
+  static PerfCounter * _sync_SlowEnter ;
+  static PerfCounter * _sync_SlowExit ;
+  static PerfCounter * _sync_SlowNotify ;
+  static PerfCounter * _sync_SlowNotifyAll ;
+  static PerfCounter * _sync_FailedSpins ;
+  static PerfCounter * _sync_SuccessfulSpins ;
+  static PerfCounter * _sync_PrivateA ;
+  static PerfCounter * _sync_PrivateB ;
+  static PerfCounter * _sync_MonInCirculation ;
+  static PerfCounter * _sync_MonScavenged ;
+  static PerfCounter * _sync_Inflations ;
+  static PerfCounter * _sync_Deflations ;
+  static PerfLongVariable * _sync_MonExtant ;
+
+ public:
+  static int Knob_Verbose;
+  static int Knob_SpinLimit;
 };
+
+#undef TEVENT
+#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
+
+#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
+
+#undef  TEVENT
+#define TEVENT(nom) {;}
+
--- a/src/share/vm/runtime/objectMonitor.inline.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/objectMonitor.inline.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -104,7 +104,3 @@
   _count = 0;
 }
 
-
-// here are the platform-dependent bodies:
-
-# include "incls/_objectMonitor_pd.inline.hpp.incl"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/park.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+# include "incls/_precompiled.incl"
+# include "incls/_park.cpp.incl"
+
+
+// Lifecycle management for TSM ParkEvents.
+// ParkEvents are type-stable (TSM).
+// In our particular implementation they happen to be immortal.
+//
+// We manage concurrency on the FreeList with a CAS-based
+// detach-modify-reattach idiom that avoids the ABA problems
+// that would otherwise be present in a simple CAS-based
+// push-pop implementation.   (push-one and pop-all)
+//
+// Caveat: Allocate() and Release() may be called from threads
+// other than the thread associated with the Event!
+// If we need to call Allocate() when running as the thread in
+// question then look for the PD calls to initialize native TLS.
+// Native TLS (Win32/Linux/Solaris) can only be initialized or
+// accessed by the associated thread.
+// See also pd_initialize().
+//
+// Note that we could defer associating a ParkEvent with a thread
+// until the 1st time the thread calls park().  unpark() calls to
+// an unprovisioned thread would be ignored.  The first park() call
+// for a thread would allocate and associate a ParkEvent and return
+// immediately.
+
+volatile int ParkEvent::ListLock = 0 ;
+ParkEvent * volatile ParkEvent::FreeList = NULL ;
+
+ParkEvent * ParkEvent::Allocate (Thread * t) {
+  // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
+  ParkEvent * ev ;
+
+  // Start by trying to recycle an existing but unassociated
+  // ParkEvent from the global free list.
+  for (;;) {
+    ev = FreeList ;
+    if (ev == NULL) break ;
+    // 1: Detach - sequester or privatize the list
+    // Tantamount to ev = Swap (&FreeList, NULL)
+    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
+       continue ;
+    }
+
+    // We've detached the list.  The list in-hand is now
+    // local to this thread.   This thread can operate on the
+    // list without risk of interference from other threads.
+    // 2: Extract -- pop the 1st element from the list.
+    ParkEvent * List = ev->FreeNext ;
+    if (List == NULL) break ;
+    for (;;) {
+        // 3: Try to reattach the residual list
+        guarantee (List != NULL, "invariant") ;
+        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
+        if (Arv == NULL) break ;
+
+        // New nodes arrived.  Try to detach the recent arrivals.
+        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
+            continue ;
+        }
+        guarantee (Arv != NULL, "invariant") ;
+        // 4: Merge Arv into List
+        ParkEvent * Tail = List ;
+        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
+        Tail->FreeNext = Arv ;
+    }
+    break ;
+  }
+
+  if (ev != NULL) {
+    guarantee (ev->AssociatedWith == NULL, "invariant") ;
+  } else {
+    // Do this the hard way -- materialize a new ParkEvent.
+    // In rare cases an allocating thread might detach a long list --
+    // installing null into FreeList -- and then stall or be obstructed.
+    // A 2nd thread calling Allocate() would see FreeList == null.
+    // The list held privately by the 1st thread is unavailable to the 2nd thread.
+    // In that case the 2nd thread would have to materialize a new ParkEvent,
+    // even though free ParkEvents existed in the system.  In this case we end up
+    // with more ParkEvents in circulation than we need, but the race is
+    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
+    // is equal to the maximum # of threads that existed at any one time.
+    // Because of the race mentioned above, segments of the freelist
+    // can be transiently inaccessible.  At worst we may end up with the
+    // # of ParkEvents in circulation slightly above the ideal.
+    // Note that if we didn't have the TSM/immortal constraint, then
+    // when reattaching, above, we could trim the list.
+    ev = new ParkEvent () ;
+    guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
+  }
+  ev->reset() ;                     // courtesy to caller
+  ev->AssociatedWith = t ;          // Associate ev with t
+  ev->FreeNext       = NULL ;
+  return ev ;
+}
+
+void ParkEvent::Release (ParkEvent * ev) {
+  if (ev == NULL) return ;
+  guarantee (ev->FreeNext == NULL      , "invariant") ;
+  ev->AssociatedWith = NULL ;
+  for (;;) {
+    // Push ev onto FreeList
+    // The mechanism is "half" lock-free.
+    ParkEvent * List = FreeList ;
+    ev->FreeNext = List ;
+    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+  }
+}
+
+// Override operator new and delete so we can ensure that the
+// least significant byte of ParkEvent addresses is 0.
+// Beware that excessive address alignment is undesirable
+// as it can result in D$ index usage imbalance as
+// well as bank access imbalance on Niagara-like platforms,
+// although Niagara's hash function should help.
+
+void * ParkEvent::operator new (size_t sz) {
+  return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
+}
+
+void ParkEvent::operator delete (void * a) {
+  // ParkEvents are type-stable and immortal ...
+  ShouldNotReachHere();
+}
+
+
+// 6399321 As a temporary measure we copied & modified the ParkEvent::
+// allocate() and release() code for use by Parkers.  The Parker:: forms
+// will eventually be removed as we consolide and shift over to ParkEvents
+// for both builtin synchronization and JSR166 operations.
+
+volatile int Parker::ListLock = 0 ;
+Parker * volatile Parker::FreeList = NULL ;
+
+Parker * Parker::Allocate (JavaThread * t) {
+  guarantee (t != NULL, "invariant") ;
+  Parker * p ;
+
+  // Start by trying to recycle an existing but unassociated
+  // Parker from the global free list.
+  for (;;) {
+    p = FreeList ;
+    if (p  == NULL) break ;
+    // 1: Detach
+    // Tantamount to p = Swap (&FreeList, NULL)
+    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
+       continue ;
+    }
+
+    // We've detached the list.  The list in-hand is now
+    // local to this thread.   This thread can operate on the
+    // list without risk of interference from other threads.
+    // 2: Extract -- pop the 1st element from the list.
+    Parker * List = p->FreeNext ;
+    if (List == NULL) break ;
+    for (;;) {
+        // 3: Try to reattach the residual list
+        guarantee (List != NULL, "invariant") ;
+        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
+        if (Arv == NULL) break ;
+
+        // New nodes arrived.  Try to detach the recent arrivals.
+        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
+            continue ;
+        }
+        guarantee (Arv != NULL, "invariant") ;
+        // 4: Merge Arv into List
+        Parker * Tail = List ;
+        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
+        Tail->FreeNext = Arv ;
+    }
+    break ;
+  }
+
+  if (p != NULL) {
+    guarantee (p->AssociatedWith == NULL, "invariant") ;
+  } else {
+    // Do this the hard way -- materialize a new Parker..
+    // In rare cases an allocating thread might detach
+    // a long list -- installing null into FreeList --and
+    // then stall.  Another thread calling Allocate() would see
+    // FreeList == null and then invoke the ctor.  In this case we
+    // end up with more Parkers in circulation than we need, but
+    // the race is rare and the outcome is benign.
+    // Ideally, the # of extant Parkers is equal to the
+    // maximum # of threads that existed at any one time.
+    // Because of the race mentioned above, segments of the
+    // freelist can be transiently inaccessible.  At worst
+    // we may end up with the # of Parkers in circulation
+    // slightly above the ideal.
+    p = new Parker() ;
+  }
+  p->AssociatedWith = t ;          // Associate p with t
+  p->FreeNext       = NULL ;
+  return p ;
+}
+
+
+void Parker::Release (Parker * p) {
+  if (p == NULL) return ;
+  guarantee (p->AssociatedWith != NULL, "invariant") ;
+  guarantee (p->FreeNext == NULL      , "invariant") ;
+  p->AssociatedWith = NULL ;
+  for (;;) {
+    // Push p onto FreeList
+    Parker * List = FreeList ;
+    p->FreeNext = List ;
+    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/park.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+/*
+ * Per-thread blocking support for JSR166. See the Java-level
+ * Documentation for rationale. Basically, park acts like wait, unpark
+ * like notify.
+ *
+ * 6271289 --
+ * To avoid errors where an os thread expires but the JavaThread still
+ * exists, Parkers are immortal (type-stable) and are recycled across
+ * new threads.  This parallels the ParkEvent implementation.
+ * Because park-unpark allow spurious wakeups it is harmless if an
+ * unpark call unparks a new thread using the old Parker reference.
+ *
+ * In the future we'll want to think about eliminating Parker and using
+ * ParkEvent instead.  There's considerable duplication between the two
+ * services.
+ *
+ */
+
+class Parker : public os::PlatformParker {
+private:
+  volatile int _counter ;
+  Parker * FreeNext ;
+  JavaThread * AssociatedWith ; // Current association
+
+public:
+  Parker() : PlatformParker() {
+    _counter       = 0 ;
+    FreeNext       = NULL ;
+    AssociatedWith = NULL ;
+  }
+protected:
+  ~Parker() { ShouldNotReachHere(); }
+public:
+  // For simplicity of interface with Java, all forms of park (indefinite,
+  // relative, and absolute) are multiplexed into one call.
+  void park(bool isAbsolute, jlong time);
+  void unpark();
+
+  // Lifecycle operators
+  static Parker * Allocate (JavaThread * t) ;
+  static void Release (Parker * e) ;
+private:
+  static Parker * volatile FreeList ;
+  static volatile int ListLock ;
+
+};
+
+/////////////////////////////////////////////////////////////
+//
+// ParkEvents are type-stable and immortal.
+//
+// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
+// associated with the thread for the thread's entire lifetime - the relationship is
+// stable. A thread will be associated at most one ParkEvent.  When the thread
+// expires, the ParkEvent moves to the EventFreeList.  New threads attempt to allocate from
+// the EventFreeList before creating a new Event.  Type-stability frees us from
+// worrying about stale Event or Thread references in the objectMonitor subsystem.
+// (A reference to ParkEvent is always valid, even though the event may no longer be associated
+// with the desired or expected thread.  A key aspect of this design is that the callers of
+// park, unpark, etc must tolerate stale references and spurious wakeups).
+//
+// Only the "associated" thread can block (park) on the ParkEvent, although
+// any other thread can unpark a reachable parkevent.  Park() is allowed to
+// return spuriously.  In fact park-unpark a really just an optimization to
+// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
+// A degenerate albeit "impolite" park-unpark implementation could simply return.
+// See http://blogs.sun.com/dave for more details.
+//
+// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
+// thread proxies, and simply make the THREAD structure type-stable and persistent.
+// Currently, we unpark events associated with threads, but ideally we'd just
+// unpark threads.
+//
+// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
+// platform-independent.  PlatformEvent provides park(), unpark(), etc., and
+// is abstract -- that is, a PlatformEvent should never be instantiated except
+// as part of a ParkEvent.
+// Equivalently we could have defined a platform-independent base-class that
+// exported Allocate(), Release(), etc.  The platform-specific class would extend
+// that base-class, adding park(), unpark(), etc.
+//
+// A word of caution: The JVM uses 2 very similar constructs:
+// 1. ParkEvent are used for Java-level "monitor" synchronization.
+// 2. Parkers are used by JSR166-JUC park-unpark.
+//
+// We'll want to eventually merge these redundant facilities and use ParkEvent.
+
+
+class ParkEvent : public os::PlatformEvent {
+  private:
+    ParkEvent * FreeNext ;
+
+    // Current association
+    Thread * AssociatedWith ;
+    intptr_t RawThreadIdentity ;        // LWPID etc
+    volatile int Incarnation ;
+
+    // diagnostic : keep track of last thread to wake this thread.
+    // this is useful for construction of dependency graphs.
+    void * LastWaker ;
+
+  public:
+    // MCS-CLH list linkage and Native Mutex/Monitor
+    ParkEvent * volatile ListNext ;
+    ParkEvent * volatile ListPrev ;
+    volatile intptr_t OnList ;
+    volatile int TState ;
+    volatile int Notified ;             // for native monitor construct
+    volatile int IsWaiting ;            // Enqueued on WaitSet
+
+
+  private:
+    static ParkEvent * volatile FreeList ;
+    static volatile int ListLock ;
+
+    // It's prudent to mark the dtor as "private"
+    // ensuring that it's not visible outside the package.
+    // Unfortunately gcc warns about such usage, so
+    // we revert to the less desirable "protected" visibility.
+    // The other compilers accept private dtors.
+
+  protected:        // Ensure dtor is never invoked
+    ~ParkEvent() { guarantee (0, "invariant") ; }
+
+    ParkEvent() : PlatformEvent() {
+       AssociatedWith = NULL ;
+       FreeNext       = NULL ;
+       ListNext       = NULL ;
+       ListPrev       = NULL ;
+       OnList         = 0 ;
+       TState         = 0 ;
+       Notified       = 0 ;
+       IsWaiting      = 0 ;
+    }
+
+    // We use placement-new to force ParkEvent instances to be
+    // aligned on 256-byte address boundaries.  This ensures that the least
+    // significant byte of a ParkEvent address is always 0.
+
+    void * operator new (size_t sz) ;
+    void operator delete (void * a) ;
+
+  public:
+    static ParkEvent * Allocate (Thread * t) ;
+    static void Release (ParkEvent * e) ;
+} ;
--- a/src/share/vm/runtime/relocator.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/relocator.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -435,6 +435,120 @@
   }
 }
 
+// Create a new array, copying the src array but adding a hole at
+// the specified location
+static typeArrayOop insert_hole_at(
+    size_t where, int hole_sz, typeArrayOop src) {
+  Thread* THREAD = Thread::current();
+  Handle src_hnd(THREAD, src);
+  typeArrayOop dst =
+      oopFactory::new_permanent_byteArray(src->length() + hole_sz, CHECK_NULL);
+  src = (typeArrayOop)src_hnd();
+
+  address src_addr = (address)src->byte_at_addr(0);
+  address dst_addr = (address)dst->byte_at_addr(0);
+
+  memcpy(dst_addr, src_addr, where);
+  memcpy(dst_addr + where + hole_sz,
+         src_addr + where, src->length() - where);
+  return dst;
+}
+
+// The width of instruction at "bci" is changing by "delta".  Adjust the stack
+// map frames.
+void Relocator::adjust_stack_map_table(int bci, int delta) {
+  if (method()->has_stackmap_table()) {
+    typeArrayOop data = method()->stackmap_data();
+    // The data in the array is a classfile representation of the stackmap
+    // table attribute, less the initial u2 tag and u4 attribute_length fields.
+    stack_map_table_attribute* attr = stack_map_table_attribute::at(
+        (address)data->byte_at_addr(0) - (sizeof(u2) + sizeof(u4)));
+
+    int count = attr->number_of_entries();
+    stack_map_frame* frame = attr->entries();
+    int bci_iter = -1;
+    bool offset_adjusted = false; // only need to adjust one offset
+
+    for (int i = 0; i < count; ++i) {
+      int offset_delta = frame->offset_delta();
+      bci_iter += offset_delta;
+
+      if (!offset_adjusted && bci_iter > bci) {
+        int new_offset_delta = offset_delta + delta;
+
+        if (frame->is_valid_offset(new_offset_delta)) {
+          frame->set_offset_delta(new_offset_delta);
+        } else {
+          assert(frame->is_same_frame() ||
+                 frame->is_same_frame_1_stack_item_frame(),
+                 "Frame must be one of the compressed forms");
+          // The new delta exceeds the capacity of the 'same_frame' or
+          // 'same_frame_1_stack_item_frame' frame types.  We need to
+          // convert these frames to the extended versions, but the extended
+          // version is bigger and requires more room.  So we allocate a
+          // new array and copy the data, being sure to leave u2-sized hole
+          // right after the 'frame_type' for the new offset field.
+          //
+          // We can safely ignore the reverse situation as a small delta
+          // can still be used in an extended version of the frame.
+
+          size_t frame_offset = (address)frame - (address)data->byte_at_addr(0);
+
+          data = insert_hole_at(frame_offset + 1, 2, data);
+          if (data == NULL) {
+            return; // out-of-memory?
+          }
+
+          address frame_addr = (address)(data->byte_at_addr(0) + frame_offset);
+          frame = stack_map_frame::at(frame_addr);
+
+
+          // Now convert the frames in place
+          if (frame->is_same_frame()) {
+            same_frame_extended::create_at(frame_addr, new_offset_delta);
+          } else {
+            same_frame_1_stack_item_extended::create_at(
+              frame_addr, new_offset_delta, NULL);
+            // the verification_info_type should already be at the right spot
+          }
+        }
+        offset_adjusted = true; // needs to be done only once, since subsequent
+                                // values are offsets from the current
+      }
+
+      // The stack map frame may contain verification types, if so we need to
+      // check and update any Uninitialized type's bci (no matter where it is).
+      int number_of_types = frame->number_of_types();
+      verification_type_info* types = frame->types();
+
+      for (int i = 0; i < number_of_types; ++i) {
+        if (types->is_uninitialized() && types->bci() > bci) {
+          types->set_bci(types->bci() + delta);
+        }
+        types = types->next();
+      }
+
+      // Full frame has stack values too
+      full_frame* ff = frame->as_full_frame();
+      if (ff != NULL) {
+        address eol = (address)types;
+        number_of_types = ff->stack_slots(eol);
+        types = ff->stack(eol);
+        for (int i = 0; i < number_of_types; ++i) {
+          if (types->is_uninitialized() && types->bci() > bci) {
+            types->set_bci(types->bci() + delta);
+          }
+          types = types->next();
+        }
+      }
+
+      frame = frame->next();
+    }
+
+    method()->set_stackmap_data(data); // in case it has changed
+  }
+}
+
 
 bool Relocator::expand_code_array(int delta) {
   int length = MAX2(code_length() + delta, code_length() * (100+code_slop_pct()) / 100);
@@ -499,6 +613,9 @@
   // And local variable table...
   adjust_local_var_table(bci, delta);
 
+  // Adjust stack maps
+  adjust_stack_map_table(bci, delta);
+
   // Relocate the pending change stack...
   for (int j = 0; j < _changes->length(); j++) {
     ChangeItem* ci = _changes->at(j);
@@ -641,6 +758,7 @@
       memmove(addr_at(bci +1 + new_pad),
               addr_at(bci +1 + old_pad),
               len * 4);
+      memset(addr_at(bci + 1), 0, new_pad); // pad must be 0
     }
   }
   return true;
--- a/src/share/vm/runtime/relocator.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/relocator.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -105,6 +105,7 @@
   void adjust_exception_table(int bci, int delta);
   void adjust_line_no_table  (int bci, int delta);
   void adjust_local_var_table(int bci, int delta);
+  void adjust_stack_map_table(int bci, int delta);
   int  get_orig_switch_pad   (int bci, bool is_lookup_switch);
   int  rc_instr_len          (int bci);
   bool expand_code_array     (int delta);
--- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -302,6 +302,9 @@
   return (f <= (double)0.0) ? (double)0.0 - f : f;
 }
 
+#endif
+
+#if defined(__SOFTFP__) || defined(PPC)
 double SharedRuntime::dsqrt(double f) {
   return sqrt(f);
 }
--- a/src/share/vm/runtime/sharedRuntime.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -116,6 +116,9 @@
 
 #if defined(__SOFTFP__) || defined(E500V2)
   static double dabs(double f);
+#endif
+
+#if defined(__SOFTFP__) || defined(PPC)
   static double dsqrt(double f);
 #endif
 
--- a/src/share/vm/runtime/synchronizer.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/synchronizer.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -32,15 +32,12 @@
   #define ATTR
 #endif
 
-// Native markword accessors for synchronization and hashCode().
-//
 // The "core" versions of monitor enter and exit reside in this file.
 // The interpreter and compilers contain specialized transliterated
 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
 // for instance.  If you make changes here, make sure to modify the
 // interpreter, and both C1 and C2 fast-path inline locking code emission.
 //
-// TODO: merge the objectMonitor and synchronizer classes.
 //
 // -----------------------------------------------------------------------------
 
@@ -53,16 +50,6 @@
   jlong, uintptr_t, char*, int, long);
 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
   jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
-  jlong, uintptr_t, char*, int);
 
 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
   char* bytes = NULL;                                                      \
@@ -99,61 +86,300 @@
 
 #endif // ndef DTRACE_ENABLED
 
-// ObjectWaiter serves as a "proxy" or surrogate thread.
-// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
-// ParkEvent instead.  Beware, however, that the JVMTI code
-// knows about ObjectWaiters, so we'll have to reconcile that code.
-// See next_waiter(), first_waiter(), etc.
+// This exists only as a workaround of dtrace bug 6254741
+int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
+  DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
+  return 0;
+}
+
+#define NINFLATIONLOCKS 256
+static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
+
+ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
+ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
+ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
+int ObjectSynchronizer::gOmInUseCount = 0;
+static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
+static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
+static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
+#define CHAINMARKER ((oop)-1)
+
+// -----------------------------------------------------------------------------
+//  Fast Monitor Enter/Exit
+// This the fast monitor enter. The interpreter and compiler use
+// some assembly copies of this code. Make sure update those code
+// if the following function is changed. The implementation is
+// extremely sensitive to race condition. Be careful.
+
+void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
+ if (UseBiasedLocking) {
+    if (!SafepointSynchronize::is_at_safepoint()) {
+      BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
+      if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
+        return;
+      }
+    } else {
+      assert(!attempt_rebias, "can not rebias toward VM thread");
+      BiasedLocking::revoke_at_safepoint(obj);
+    }
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ slow_enter (obj, lock, THREAD) ;
+}
+
+void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
+  assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
+  // if displaced header is null, the previous enter is recursive enter, no-op
+  markOop dhw = lock->displaced_header();
+  markOop mark ;
+  if (dhw == NULL) {
+     // Recursive stack-lock.
+     // Diagnostics -- Could be: stack-locked, inflating, inflated.
+     mark = object->mark() ;
+     assert (!mark->is_neutral(), "invariant") ;
+     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
+        assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
+     }
+     if (mark->has_monitor()) {
+        ObjectMonitor * m = mark->monitor() ;
+        assert(((oop)(m->object()))->mark() == mark, "invariant") ;
+        assert(m->is_entered(THREAD), "invariant") ;
+     }
+     return ;
+  }
+
+  mark = object->mark() ;
 
-class ObjectWaiter : public StackObj {
- public:
-  enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
-  enum Sorted  { PREPEND, APPEND, SORTED } ;
-  ObjectWaiter * volatile _next;
-  ObjectWaiter * volatile _prev;
-  Thread*       _thread;
-  ParkEvent *   _event;
-  volatile int  _notified ;
-  volatile TStates TState ;
-  Sorted        _Sorted ;           // List placement disposition
-  bool          _active ;           // Contention monitoring is enabled
- public:
-  ObjectWaiter(Thread* thread) {
-    _next     = NULL;
-    _prev     = NULL;
-    _notified = 0;
-    TState    = TS_RUN ;
-    _thread   = thread;
-    _event    = thread->_ParkEvent ;
-    _active   = false;
-    assert (_event != NULL, "invariant") ;
+  // If the object is stack-locked by the current thread, try to
+  // swing the displaced header from the box back to the mark.
+  if (mark == (markOop) lock) {
+     assert (dhw->is_neutral(), "invariant") ;
+     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
+        TEVENT (fast_exit: release stacklock) ;
+        return;
+     }
+  }
+
+  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
+}
+
+// -----------------------------------------------------------------------------
+// Interpreter/Compiler Slow Case
+// This routine is used to handle interpreter/compiler slow case
+// We don't need to use fast path here, because it must have been
+// failed in the interpreter/compiler code.
+void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
+  markOop mark = obj->mark();
+  assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+
+  if (mark->is_neutral()) {
+    // Anticipate successful CAS -- the ST of the displaced mark must
+    // be visible <= the ST performed by the CAS.
+    lock->set_displaced_header(mark);
+    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
+      TEVENT (slow_enter: release stacklock) ;
+      return ;
+    }
+    // Fall through to inflate() ...
+  } else
+  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+    assert(lock != mark->locker(), "must not re-lock the same lock");
+    assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
+    lock->set_displaced_header(NULL);
+    return;
+  }
+
+#if 0
+  // The following optimization isn't particularly useful.
+  if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
+    lock->set_displaced_header (NULL) ;
+    return ;
+  }
+#endif
+
+  // The object header will never be displaced to this lock,
+  // so it does not matter what the value is, except that it
+  // must be non-zero to avoid looking like a re-entrant lock,
+  // and must not look locked either.
+  lock->set_displaced_header(markOopDesc::unused_mark());
+  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
+}
+
+// This routine is used to handle interpreter/compiler slow case
+// We don't need to use fast path here, because it must have
+// failed in the interpreter/compiler code. Simply use the heavy
+// weight monitor should be ok, unless someone find otherwise.
+void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
+  fast_exit (object, lock, THREAD) ;
+}
+
+// -----------------------------------------------------------------------------
+// Class Loader  support to workaround deadlocks on the class loader lock objects
+// Also used by GC
+// complete_exit()/reenter() are used to wait on a nested lock
+// i.e. to give up an outer lock completely and then re-enter
+// Used when holding nested locks - lock acquisition order: lock1 then lock2
+//  1) complete_exit lock1 - saving recursion count
+//  2) wait on lock2
+//  3) when notified on lock2, unlock lock2
+//  4) reenter lock1 with original recursion count
+//  5) lock lock2
+// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
+intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
+  TEVENT (complete_exit) ;
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   }
 
-  void wait_reenter_begin(ObjectMonitor *mon) {
-    JavaThread *jt = (JavaThread *)this->_thread;
-    _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
+  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
+
+  return monitor->complete_exit(THREAD);
+}
+
+// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
+void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
+  TEVENT (reenter) ;
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
+
+  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
+
+  monitor->reenter(recursion, THREAD);
+}
+// -----------------------------------------------------------------------------
+// JNI locks on java objects
+// NOTE: must use heavy weight monitor to handle jni monitor enter
+void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
+  // the current locking is from JNI instead of Java code
+  TEVENT (jni_enter) ;
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
+  THREAD->set_current_pending_monitor_is_from_java(false);
+  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
+  THREAD->set_current_pending_monitor_is_from_java(true);
+}
+
+// NOTE: must use heavy weight monitor to handle jni monitor enter
+bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   }
 
-  void wait_reenter_end(ObjectMonitor *mon) {
-    JavaThread *jt = (JavaThread *)this->_thread;
-    JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
+  ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
+  return monitor->try_enter(THREAD);
+}
+
+
+// NOTE: must use heavy weight monitor to handle jni monitor exit
+void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
+  TEVENT (jni_exit) ;
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+  }
+  assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+
+  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
+  // If this thread has locked the object, exit the monitor.  Note:  can't use
+  // monitor->check(CHECK); must exit even if an exception is pending.
+  if (monitor->check(THREAD)) {
+     monitor->exit(THREAD);
   }
-};
+}
+
+// -----------------------------------------------------------------------------
+// Internal VM locks on java objects
+// standard constructor, allows locking failures
+ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
+  _dolock = doLock;
+  _thread = thread;
+  debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
+  _obj = obj;
 
-enum ManifestConstants {
-    ClearResponsibleAtSTW   = 0,
-    MaximumRecheckInterval  = 1000
-} ;
+  if (_dolock) {
+    TEVENT (ObjectLocker) ;
+
+    ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
+  }
+}
+
+ObjectLocker::~ObjectLocker() {
+  if (_dolock) {
+    ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
+  }
+}
 
 
-#undef TEVENT
-#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
+// -----------------------------------------------------------------------------
+//  Wait/Notify/NotifyAll
+// NOTE: must use heavy weight monitor to handle wait()
+void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
+  if (millis < 0) {
+    TEVENT (wait - throw IAX) ;
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+  }
+  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
+  DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
+  monitor->wait(millis, true, THREAD);
+
+  /* This dummy call is in place to get around dtrace bug 6254741.  Once
+     that's fixed we can uncomment the following line and remove the call */
+  // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
+  dtrace_waited_probe(monitor, obj, THREAD);
+}
 
-#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
+void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
+  if (millis < 0) {
+    TEVENT (wait - throw IAX) ;
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+  }
+  ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
+}
+
+void ObjectSynchronizer::notify(Handle obj, TRAPS) {
+ if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
 
-#undef  TEVENT
-#define TEVENT(nom) {;}
+  markOop mark = obj->mark();
+  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+    return;
+  }
+  ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
+}
 
+// NOTE: see comment of notify()
+void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
+
+  markOop mark = obj->mark();
+  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+    return;
+  }
+  ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
+}
+
+// -----------------------------------------------------------------------------
+// Hash Code handling
+//
 // Performance concern:
 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
 // OrderAccess::Dummy variable.  This store is unnecessary for correctness.
@@ -188,44 +414,73 @@
 static int MonitorScavengeThreshold = 1000000 ;
 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
 
-
-// Tunables ...
-// The knob* variables are effectively final.  Once set they should
-// never be modified hence.  Consider using __read_mostly with GCC.
+static markOop ReadStableMark (oop obj) {
+  markOop mark = obj->mark() ;
+  if (!mark->is_being_inflated()) {
+    return mark ;       // normal fast-path return
+  }
 
-static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
-static int Knob_HandOff            = 0 ;
-static int Knob_Verbose            = 0 ;
-static int Knob_ReportSettings     = 0 ;
+  int its = 0 ;
+  for (;;) {
+    markOop mark = obj->mark() ;
+    if (!mark->is_being_inflated()) {
+      return mark ;    // normal fast-path return
+    }
+
+    // The object is being inflated by some other thread.
+    // The caller of ReadStableMark() must wait for inflation to complete.
+    // Avoid live-lock
+    // TODO: consider calling SafepointSynchronize::do_call_back() while
+    // spinning to see if there's a safepoint pending.  If so, immediately
+    // yielding or blocking would be appropriate.  Avoid spinning while
+    // there is a safepoint pending.
+    // TODO: add inflation contention performance counters.
+    // TODO: restrict the aggregate number of spinners.
 
-static int Knob_SpinLimit          = 5000 ;    // derived by an external tool -
-static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
-static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
-static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
-static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
-static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
-static int Knob_SpinEarly          = 1 ;
-static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
-static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
-static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
-static int Knob_Bonus              = 100 ;     // spin success bonus
-static int Knob_BonusB             = 100 ;     // spin success bonus
-static int Knob_Penalty            = 200 ;     // spin failure penalty
-static int Knob_Poverty            = 1000 ;
-static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
-static int Knob_FixedSpin          = 0 ;
-static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
-static int Knob_UsePause           = 1 ;
-static int Knob_ExitPolicy         = 0 ;
-static int Knob_PreSpin            = 10 ;      // 20-100 likely better
-static int Knob_ResetEvent         = 0 ;
-static int BackOffMask             = 0 ;
-
-static int Knob_FastHSSEC          = 0 ;
-static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
-static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
-static volatile int InitDone       = 0 ;
-
+    ++its ;
+    if (its > 10000 || !os::is_MP()) {
+       if (its & 1) {
+         os::NakedYield() ;
+         TEVENT (Inflate: INFLATING - yield) ;
+       } else {
+         // Note that the following code attenuates the livelock problem but is not
+         // a complete remedy.  A more complete solution would require that the inflating
+         // thread hold the associated inflation lock.  The following code simply restricts
+         // the number of spinners to at most one.  We'll have N-2 threads blocked
+         // on the inflationlock, 1 thread holding the inflation lock and using
+         // a yield/park strategy, and 1 thread in the midst of inflation.
+         // A more refined approach would be to change the encoding of INFLATING
+         // to allow encapsulation of a native thread pointer.  Threads waiting for
+         // inflation to complete would use CAS to push themselves onto a singly linked
+         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
+         // and calling park().  When inflation was complete the thread that accomplished inflation
+         // would detach the list and set the markword to inflated with a single CAS and
+         // then for each thread on the list, set the flag and unpark() the thread.
+         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
+         // wakes at most one thread whereas we need to wake the entire list.
+         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
+         int YieldThenBlock = 0 ;
+         assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
+         assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
+         Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
+         while (obj->mark() == markOopDesc::INFLATING()) {
+           // Beware: NakedYield() is advisory and has almost no effect on some platforms
+           // so we periodically call Self->_ParkEvent->park(1).
+           // We use a mixed spin/yield/block mechanism.
+           if ((YieldThenBlock++) >= 16) {
+              Thread::current()->_ParkEvent->park(1) ;
+           } else {
+              os::NakedYield() ;
+           }
+         }
+         Thread::muxRelease (InflationLocks + ix ) ;
+         TEVENT (Inflate: INFLATING - yield/park) ;
+       }
+    } else {
+       SpinPause() ;       // SMP-polite spinning
+    }
+  }
+}
 
 // hashCode() generation :
 //
@@ -290,416 +545,272 @@
   TEVENT (hashCode: GENERATE) ;
   return value;
 }
+//
+intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
+  if (UseBiasedLocking) {
+    // NOTE: many places throughout the JVM do not expect a safepoint
+    // to be taken here, in particular most operations on perm gen
+    // objects. However, we only ever bias Java instances and all of
+    // the call sites of identity_hash that might revoke biases have
+    // been checked to make sure they can handle a safepoint. The
+    // added check of the bias pattern is to avoid useless calls to
+    // thread-local storage.
+    if (obj->mark()->has_bias_pattern()) {
+      // Box and unbox the raw reference just in case we cause a STW safepoint.
+      Handle hobj (Self, obj) ;
+      // Relaxing assertion for bug 6320749.
+      assert (Universe::verify_in_progress() ||
+              !SafepointSynchronize::is_at_safepoint(),
+             "biases should not be seen by VM thread here");
+      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
+      obj = hobj() ;
+      assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    }
+  }
 
-void BasicLock::print_on(outputStream* st) const {
-  st->print("monitor");
+  // hashCode() is a heap mutator ...
+  // Relaxing assertion for bug 6320749.
+  assert (Universe::verify_in_progress() ||
+          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
+  assert (Universe::verify_in_progress() ||
+          Self->is_Java_thread() , "invariant") ;
+  assert (Universe::verify_in_progress() ||
+         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
+
+  ObjectMonitor* monitor = NULL;
+  markOop temp, test;
+  intptr_t hash;
+  markOop mark = ReadStableMark (obj);
+
+  // object should remain ineligible for biased locking
+  assert (!mark->has_bias_pattern(), "invariant") ;
+
+  if (mark->is_neutral()) {
+    hash = mark->hash();              // this is a normal header
+    if (hash) {                       // if it has hash, just return it
+      return hash;
+    }
+    hash = get_next_hash(Self, obj);  // allocate a new hash code
+    temp = mark->copy_set_hash(hash); // merge the hash code into header
+    // use (machine word version) atomic operation to install the hash
+    test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
+    if (test == mark) {
+      return hash;
+    }
+    // If atomic operation failed, we must inflate the header
+    // into heavy weight monitor. We could add more code here
+    // for fast path, but it does not worth the complexity.
+  } else if (mark->has_monitor()) {
+    monitor = mark->monitor();
+    temp = monitor->header();
+    assert (temp->is_neutral(), "invariant") ;
+    hash = temp->hash();
+    if (hash) {
+      return hash;
+    }
+    // Skip to the following code to reduce code size
+  } else if (Self->is_lock_owned((address)mark->locker())) {
+    temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
+    assert (temp->is_neutral(), "invariant") ;
+    hash = temp->hash();              // by current thread, check if the displaced
+    if (hash) {                       // header contains hash code
+      return hash;
+    }
+    // WARNING:
+    //   The displaced header is strictly immutable.
+    // It can NOT be changed in ANY cases. So we have
+    // to inflate the header into heavyweight monitor
+    // even the current thread owns the lock. The reason
+    // is the BasicLock (stack slot) will be asynchronously
+    // read by other threads during the inflate() function.
+    // Any change to stack may not propagate to other threads
+    // correctly.
+  }
+
+  // Inflate the monitor to set hash code
+  monitor = ObjectSynchronizer::inflate(Self, obj);
+  // Load displaced header and check it has hash code
+  mark = monitor->header();
+  assert (mark->is_neutral(), "invariant") ;
+  hash = mark->hash();
+  if (hash == 0) {
+    hash = get_next_hash(Self, obj);
+    temp = mark->copy_set_hash(hash); // merge hash code into header
+    assert (temp->is_neutral(), "invariant") ;
+    test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
+    if (test != mark) {
+      // The only update to the header in the monitor (outside GC)
+      // is install the hash code. If someone add new usage of
+      // displaced header, please update this code
+      hash = test->hash();
+      assert (test->is_neutral(), "invariant") ;
+      assert (hash != 0, "Trivial unexpected object/monitor header usage.");
+    }
+  }
+  // We finally get the hash
+  return hash;
 }
 
-void BasicLock::move_to(oop obj, BasicLock* dest) {
-  // Check to see if we need to inflate the lock. This is only needed
-  // if an object is locked using "this" lightweight monitor. In that
-  // case, the displaced_header() is unlocked, because the
-  // displaced_header() contains the header for the originally unlocked
-  // object. However the object could have already been inflated. But it
-  // does not matter, the inflation will just a no-op. For other cases,
-  // the displaced header will be either 0x0 or 0x3, which are location
-  // independent, therefore the BasicLock is free to move.
-  //
-  // During OSR we may need to relocate a BasicLock (which contains a
-  // displaced word) from a location in an interpreter frame to a
-  // new location in a compiled frame.  "this" refers to the source
-  // basiclock in the interpreter frame.  "dest" refers to the destination
-  // basiclock in the new compiled frame.  We *always* inflate in move_to().
-  // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
-  // cause performance problems in code that makes heavy use of a small # of
-  // uncontended locks.   (We'd inflate during OSR, and then sync performance
-  // would subsequently plummet because the thread would be forced thru the slow-path).
-  // This problem has been made largely moot on IA32 by inlining the inflated fast-path
-  // operations in Fast_Lock and Fast_Unlock in i486.ad.
-  //
-  // Note that there is a way to safely swing the object's markword from
-  // one stack location to another.  This avoids inflation.  Obviously,
-  // we need to ensure that both locations refer to the current thread's stack.
-  // There are some subtle concurrency issues, however, and since the benefit is
-  // is small (given the support for inflated fast-path locking in the fast_lock, etc)
-  // we'll leave that optimization for another time.
+// Deprecated -- use FastHashCode() instead.
 
-  if (displaced_header()->is_neutral()) {
-    ObjectSynchronizer::inflate_helper(obj);
-    // WARNING: We can not put check here, because the inflation
-    // will not update the displaced header. Once BasicLock is inflated,
-    // no one should ever look at its content.
-  } else {
-    // Typically the displaced header will be 0 (recursive stack lock) or
-    // unused_mark.  Naively we'd like to assert that the displaced mark
-    // value is either 0, neutral, or 3.  But with the advent of the
-    // store-before-CAS avoidance in fast_lock/compiler_lock_object
-    // we can find any flavor mark in the displaced mark.
-  }
-// [RGV] The next line appears to do nothing!
-  intptr_t dh = (intptr_t) displaced_header();
-  dest->set_displaced_header(displaced_header());
+intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
+  return FastHashCode (Thread::current(), obj()) ;
 }
 
-// -----------------------------------------------------------------------------
 
-// standard constructor, allows locking failures
-ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
-  _dolock = doLock;
-  _thread = thread;
-  debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
-  _obj = obj;
+bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
+                                                   Handle h_obj) {
+  if (UseBiasedLocking) {
+    BiasedLocking::revoke_and_rebias(h_obj, false, thread);
+    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
+
+  assert(thread == JavaThread::current(), "Can only be called on current thread");
+  oop obj = h_obj();
+
+  markOop mark = ReadStableMark (obj) ;
 
-  if (_dolock) {
-    TEVENT (ObjectLocker) ;
-
-    ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
+  // Uncontended case, header points to stack
+  if (mark->has_locker()) {
+    return thread->is_lock_owned((address)mark->locker());
   }
-}
-
-ObjectLocker::~ObjectLocker() {
-  if (_dolock) {
-    ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
+  // Contended case, header points to ObjectMonitor (tagged pointer)
+  if (mark->has_monitor()) {
+    ObjectMonitor* monitor = mark->monitor();
+    return monitor->is_entered(thread) != 0 ;
   }
+  // Unlocked case, header in place
+  assert(mark->is_neutral(), "sanity check");
+  return false;
 }
 
-// -----------------------------------------------------------------------------
+// Be aware of this method could revoke bias of the lock object.
+// This method querys the ownership of the lock handle specified by 'h_obj'.
+// If the current thread owns the lock, it returns owner_self. If no
+// thread owns the lock, it returns owner_none. Otherwise, it will return
+// ower_other.
+ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
+(JavaThread *self, Handle h_obj) {
+  // The caller must beware this method can revoke bias, and
+  // revocation can result in a safepoint.
+  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
+  assert (self->thread_state() != _thread_blocked , "invariant") ;
 
+  // Possible mark states: neutral, biased, stack-locked, inflated
+
+  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+    // CASE: biased
+    BiasedLocking::revoke_and_rebias(h_obj, false, self);
+    assert(!h_obj->mark()->has_bias_pattern(),
+           "biases should be revoked by now");
+  }
 
-PerfCounter * ObjectSynchronizer::_sync_Inflations                  = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_Deflations                  = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts       = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_FutileWakeups               = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_Parks                       = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications          = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_Notifications               = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_PrivateA                    = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_PrivateB                    = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_SlowExit                    = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_SlowEnter                   = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_SlowNotify                  = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll               = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_FailedSpins                 = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins             = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_MonInCirculation            = NULL ;
-PerfCounter * ObjectSynchronizer::_sync_MonScavenged                = NULL ;
-PerfLongVariable * ObjectSynchronizer::_sync_MonExtant              = NULL ;
+  assert(self == JavaThread::current(), "Can only be called on current thread");
+  oop obj = h_obj();
+  markOop mark = ReadStableMark (obj) ;
+
+  // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
+  if (mark->has_locker()) {
+    return self->is_lock_owned((address)mark->locker()) ?
+      owner_self : owner_other;
+  }
 
-// One-shot global initialization for the sync subsystem.
-// We could also defer initialization and initialize on-demand
-// the first time we call inflate().  Initialization would
-// be protected - like so many things - by the MonitorCache_lock.
+  // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
+  // The Object:ObjectMonitor relationship is stable as long as we're
+  // not at a safepoint.
+  if (mark->has_monitor()) {
+    void * owner = mark->monitor()->_owner ;
+    if (owner == NULL) return owner_none ;
+    return (owner == self ||
+            self->is_lock_owned((address)owner)) ? owner_self : owner_other;
+  }
+
+  // CASE: neutral
+  assert(mark->is_neutral(), "sanity check");
+  return owner_none ;           // it's unlocked
+}
 
-void ObjectSynchronizer::Initialize () {
-  static int InitializationCompleted = 0 ;
-  assert (InitializationCompleted == 0, "invariant") ;
-  InitializationCompleted = 1 ;
-  if (UsePerfData) {
-      EXCEPTION_MARK ;
-      #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
-      #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
-      NEWPERFCOUNTER(_sync_Inflations) ;
-      NEWPERFCOUNTER(_sync_Deflations) ;
-      NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
-      NEWPERFCOUNTER(_sync_FutileWakeups) ;
-      NEWPERFCOUNTER(_sync_Parks) ;
-      NEWPERFCOUNTER(_sync_EmptyNotifications) ;
-      NEWPERFCOUNTER(_sync_Notifications) ;
-      NEWPERFCOUNTER(_sync_SlowEnter) ;
-      NEWPERFCOUNTER(_sync_SlowExit) ;
-      NEWPERFCOUNTER(_sync_SlowNotify) ;
-      NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
-      NEWPERFCOUNTER(_sync_FailedSpins) ;
-      NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
-      NEWPERFCOUNTER(_sync_PrivateA) ;
-      NEWPERFCOUNTER(_sync_PrivateB) ;
-      NEWPERFCOUNTER(_sync_MonInCirculation) ;
-      NEWPERFCOUNTER(_sync_MonScavenged) ;
-      NEWPERFVARIABLE(_sync_MonExtant) ;
-      #undef NEWPERFCOUNTER
+// FIXME: jvmti should call this
+JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
+  if (UseBiasedLocking) {
+    if (SafepointSynchronize::is_at_safepoint()) {
+      BiasedLocking::revoke_at_safepoint(h_obj);
+    } else {
+      BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
+    }
+    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  }
+
+  oop obj = h_obj();
+  address owner = NULL;
+
+  markOop mark = ReadStableMark (obj) ;
+
+  // Uncontended case, header points to stack
+  if (mark->has_locker()) {
+    owner = (address) mark->locker();
+  }
+
+  // Contended case, header points to ObjectMonitor (tagged pointer)
+  if (mark->has_monitor()) {
+    ObjectMonitor* monitor = mark->monitor();
+    assert(monitor != NULL, "monitor should be non-null");
+    owner = (address) monitor->owner();
+  }
+
+  if (owner != NULL) {
+    return Threads::owning_thread_from_monitor_owner(owner, doLock);
+  }
+
+  // Unlocked case, header in place
+  // Cannot have assertion since this object may have been
+  // locked by another thread when reaching here.
+  // assert(mark->is_neutral(), "sanity check");
+
+  return NULL;
+}
+// Visitors ...
+
+void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
+  ObjectMonitor* block = gBlockList;
+  ObjectMonitor* mid;
+  while (block) {
+    assert(block->object() == CHAINMARKER, "must be a block header");
+    for (int i = _BLOCKSIZE - 1; i > 0; i--) {
+      mid = block + i;
+      oop object = (oop) mid->object();
+      if (object != NULL) {
+        closure->do_monitor(mid);
+      }
+    }
+    block = (ObjectMonitor*) block->FreeNext;
   }
 }
 
-// Compile-time asserts
-// When possible, it's better to catch errors deterministically at
-// compile-time than at runtime.  The down-side to using compile-time
-// asserts is that error message -- often something about negative array
-// indices -- is opaque.
-
-#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
-
-void ObjectMonitor::ctAsserts() {
-  CTASSERT(offset_of (ObjectMonitor, _header) == 0);
-}
-
-static int Adjust (volatile int * adr, int dx) {
-  int v ;
-  for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
-  return v ;
-}
-
-// Ad-hoc mutual exclusion primitives: SpinLock and Mux
-//
-// We employ SpinLocks _only for low-contention, fixed-length
-// short-duration critical sections where we're concerned
-// about native mutex_t or HotSpot Mutex:: latency.
-// The mux construct provides a spin-then-block mutual exclusion
-// mechanism.
-//
-// Testing has shown that contention on the ListLock guarding gFreeList
-// is common.  If we implement ListLock as a simple SpinLock it's common
-// for the JVM to devolve to yielding with little progress.  This is true
-// despite the fact that the critical sections protected by ListLock are
-// extremely short.
-//
-// TODO-FIXME: ListLock should be of type SpinLock.
-// We should make this a 1st-class type, integrated into the lock
-// hierarchy as leaf-locks.  Critically, the SpinLock structure
-// should have sufficient padding to avoid false-sharing and excessive
-// cache-coherency traffic.
-
-
-typedef volatile int SpinLockT ;
-
-void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
-  if (Atomic::cmpxchg (1, adr, 0) == 0) {
-     return ;   // normal fast-path return
-  }
-
-  // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
-  TEVENT (SpinAcquire - ctx) ;
-  int ctr = 0 ;
-  int Yields = 0 ;
-  for (;;) {
-     while (*adr != 0) {
-        ++ctr ;
-        if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
-           if (Yields > 5) {
-             // Consider using a simple NakedSleep() instead.
-             // Then SpinAcquire could be called by non-JVM threads
-             Thread::current()->_ParkEvent->park(1) ;
-           } else {
-             os::NakedYield() ;
-             ++Yields ;
-           }
-        } else {
-           SpinPause() ;
-        }
-     }
-     if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
-  }
-}
-
-void Thread::SpinRelease (volatile int * adr) {
-  assert (*adr != 0, "invariant") ;
-  OrderAccess::fence() ;      // guarantee at least release consistency.
-  // Roach-motel semantics.
-  // It's safe if subsequent LDs and STs float "up" into the critical section,
-  // but prior LDs and STs within the critical section can't be allowed
-  // to reorder or float past the ST that releases the lock.
-  *adr = 0 ;
+// Get the next block in the block list.
+static inline ObjectMonitor* next(ObjectMonitor* block) {
+  assert(block->object() == CHAINMARKER, "must be a block header");
+  block = block->FreeNext ;
+  assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
+  return block;
 }
 
-// muxAcquire and muxRelease:
-//
-// *  muxAcquire and muxRelease support a single-word lock-word construct.
-//    The LSB of the word is set IFF the lock is held.
-//    The remainder of the word points to the head of a singly-linked list
-//    of threads blocked on the lock.
-//
-// *  The current implementation of muxAcquire-muxRelease uses its own
-//    dedicated Thread._MuxEvent instance.  If we're interested in
-//    minimizing the peak number of extant ParkEvent instances then
-//    we could eliminate _MuxEvent and "borrow" _ParkEvent as long
-//    as certain invariants were satisfied.  Specifically, care would need
-//    to be taken with regards to consuming unpark() "permits".
-//    A safe rule of thumb is that a thread would never call muxAcquire()
-//    if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
-//    park().  Otherwise the _ParkEvent park() operation in muxAcquire() could
-//    consume an unpark() permit intended for monitorenter, for instance.
-//    One way around this would be to widen the restricted-range semaphore
-//    implemented in park().  Another alternative would be to provide
-//    multiple instances of the PlatformEvent() for each thread.  One
-//    instance would be dedicated to muxAcquire-muxRelease, for instance.
-//
-// *  Usage:
-//    -- Only as leaf locks
-//    -- for short-term locking only as muxAcquire does not perform
-//       thread state transitions.
-//
-// Alternatives:
-// *  We could implement muxAcquire and muxRelease with MCS or CLH locks
-//    but with parking or spin-then-park instead of pure spinning.
-// *  Use Taura-Oyama-Yonenzawa locks.
-// *  It's possible to construct a 1-0 lock if we encode the lockword as
-//    (List,LockByte).  Acquire will CAS the full lockword while Release
-//    will STB 0 into the LockByte.  The 1-0 scheme admits stranding, so
-//    acquiring threads use timers (ParkTimed) to detect and recover from
-//    the stranding window.  Thread/Node structures must be aligned on 256-byte
-//    boundaries by using placement-new.
-// *  Augment MCS with advisory back-link fields maintained with CAS().
-//    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
-//    The validity of the backlinks must be ratified before we trust the value.
-//    If the backlinks are invalid the exiting thread must back-track through the
-//    the forward links, which are always trustworthy.
-// *  Add a successor indication.  The LockWord is currently encoded as
-//    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
-//    to provide the usual futile-wakeup optimization.
-//    See RTStt for details.
-// *  Consider schedctl.sc_nopreempt to cover the critical section.
-//
 
-
-typedef volatile intptr_t MutexT ;      // Mux Lock-word
-enum MuxBits { LOCKBIT = 1 } ;
-
-void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
-  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
-  if (w == 0) return ;
-  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-     return ;
-  }
-
-  TEVENT (muxAcquire - Contention) ;
-  ParkEvent * const Self = Thread::current()->_MuxEvent ;
-  assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
-  for (;;) {
-     int its = (os::is_MP() ? 100 : 0) + 1 ;
-
-     // Optional spin phase: spin-then-park strategy
-     while (--its >= 0) {
-       w = *Lock ;
-       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-          return ;
-       }
-     }
-
-     Self->reset() ;
-     Self->OnList = intptr_t(Lock) ;
-     // The following fence() isn't _strictly necessary as the subsequent
-     // CAS() both serializes execution and ratifies the fetched *Lock value.
-     OrderAccess::fence();
-     for (;;) {
-        w = *Lock ;
-        if ((w & LOCKBIT) == 0) {
-            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-                Self->OnList = 0 ;   // hygiene - allows stronger asserts
-                return ;
-            }
-            continue ;      // Interference -- *Lock changed -- Just retry
-        }
-        assert (w & LOCKBIT, "invariant") ;
-        Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
-        if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
-     }
-
-     while (Self->OnList != 0) {
-        Self->park() ;
-     }
-  }
-}
-
-void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
-  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
-  if (w == 0) return ;
-  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-    return ;
-  }
-
-  TEVENT (muxAcquire - Contention) ;
-  ParkEvent * ReleaseAfter = NULL ;
-  if (ev == NULL) {
-    ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
-  }
-  assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
-  for (;;) {
-    guarantee (ev->OnList == 0, "invariant") ;
-    int its = (os::is_MP() ? 100 : 0) + 1 ;
-
-    // Optional spin phase: spin-then-park strategy
-    while (--its >= 0) {
-      w = *Lock ;
-      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-        if (ReleaseAfter != NULL) {
-          ParkEvent::Release (ReleaseAfter) ;
-        }
-        return ;
+void ObjectSynchronizer::oops_do(OopClosure* f) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
+    assert(block->object() == CHAINMARKER, "must be a block header");
+    for (int i = 1; i < _BLOCKSIZE; i++) {
+      ObjectMonitor* mid = &block[i];
+      if (mid->object() != NULL) {
+        f->do_oop((oop*)mid->object_addr());
       }
     }
-
-    ev->reset() ;
-    ev->OnList = intptr_t(Lock) ;
-    // The following fence() isn't _strictly necessary as the subsequent
-    // CAS() both serializes execution and ratifies the fetched *Lock value.
-    OrderAccess::fence();
-    for (;;) {
-      w = *Lock ;
-      if ((w & LOCKBIT) == 0) {
-        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
-          ev->OnList = 0 ;
-          // We call ::Release while holding the outer lock, thus
-          // artificially lengthening the critical section.
-          // Consider deferring the ::Release() until the subsequent unlock(),
-          // after we've dropped the outer lock.
-          if (ReleaseAfter != NULL) {
-            ParkEvent::Release (ReleaseAfter) ;
-          }
-          return ;
-        }
-        continue ;      // Interference -- *Lock changed -- Just retry
-      }
-      assert (w & LOCKBIT, "invariant") ;
-      ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
-      if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
-    }
-
-    while (ev->OnList != 0) {
-      ev->park() ;
-    }
   }
 }
 
-// Release() must extract a successor from the list and then wake that thread.
-// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
-// similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based
-// Release() would :
-// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
-// (B) Extract a successor from the private list "in-hand"
-// (C) attempt to CAS() the residual back into *Lock over null.
-//     If there were any newly arrived threads and the CAS() would fail.
-//     In that case Release() would detach the RATs, re-merge the list in-hand
-//     with the RATs and repeat as needed.  Alternately, Release() might
-//     detach and extract a successor, but then pass the residual list to the wakee.
-//     The wakee would be responsible for reattaching and remerging before it
-//     competed for the lock.
-//
-// Both "pop" and DMR are immune from ABA corruption -- there can be
-// multiple concurrent pushers, but only one popper or detacher.
-// This implementation pops from the head of the list.  This is unfair,
-// but tends to provide excellent throughput as hot threads remain hot.
-// (We wake recently run threads first).
 
-void Thread::muxRelease (volatile intptr_t * Lock)  {
-  for (;;) {
-    const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
-    assert (w & LOCKBIT, "invariant") ;
-    if (w == LOCKBIT) return ;
-    ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
-    assert (List != NULL, "invariant") ;
-    assert (List->OnList == intptr_t(Lock), "invariant") ;
-    ParkEvent * nxt = List->ListNext ;
-
-    // The following CAS() releases the lock and pops the head element.
-    if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
-      continue ;
-    }
-    List->OnList = 0 ;
-    OrderAccess::fence() ;
-    List->unpark () ;
-    return ;
-  }
-}
-
+// -----------------------------------------------------------------------------
 // ObjectMonitor Lifecycle
 // -----------------------
 // Inflation unlinks monitors from the global gFreeList and
@@ -718,41 +829,7 @@
 // --   assigned to an object.  The object is inflated and the mark refers
 //      to the objectmonitor.
 //
-// TODO-FIXME:
-//
-// *  We currently protect the gFreeList with a simple lock.
-//    An alternate lock-free scheme would be to pop elements from the gFreeList
-//    with CAS.  This would be safe from ABA corruption as long we only
-//    recycled previously appearing elements onto the list in deflate_idle_monitors()
-//    at STW-time.  Completely new elements could always be pushed onto the gFreeList
-//    with CAS.  Elements that appeared previously on the list could only
-//    be installed at STW-time.
-//
-// *  For efficiency and to help reduce the store-before-CAS penalty
-//    the objectmonitors on gFreeList or local free lists should be ready to install
-//    with the exception of _header and _object.  _object can be set after inflation.
-//    In particular, keep all objectMonitors on a thread's private list in ready-to-install
-//    state with m.Owner set properly.
-//
-// *  We could all diffuse contention by using multiple global (FreeList, Lock)
-//    pairs -- threads could use trylock() and a cyclic-scan strategy to search for
-//    an unlocked free list.
-//
-// *  Add lifecycle tags and assert()s.
-//
-// *  Be more consistent about when we clear an objectmonitor's fields:
-//    A.  After extracting the objectmonitor from a free list.
-//    B.  After adding an objectmonitor to a free list.
-//
 
-ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
-ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
-ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
-int ObjectSynchronizer::gOmInUseCount = 0;
-static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
-static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
-static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
-#define CHAINMARKER ((oop)-1)
 
 // Constraining monitor pool growth via MonitorBound ...
 //
@@ -768,41 +845,8 @@
 // we'll incur more safepoints, which are harmful to performance.
 // See also: GuaranteedSafepointInterval
 //
-// As noted elsewhere, the correct long-term solution is to deflate at
-// monitorexit-time, in which case the number of inflated objects is bounded
-// by the number of threads.  That policy obviates the need for scavenging at
-// STW safepoint time.   As an aside, scavenging can be time-consuming when the
-// # of extant monitors is large.   Unfortunately there's a day-1 assumption baked
-// into much HotSpot code that the object::monitor relationship, once established
-// or observed, will remain stable except over potential safepoints.
-//
-// We can use either a blocking synchronous VM operation or an async VM operation.
-// -- If we use a blocking VM operation :
-//    Calls to ScavengeCheck() should be inserted only into 'safe' locations in paths
-//    that lead to ::inflate() or ::omAlloc().
-//    Even though the safepoint will not directly induce GC, a GC might
-//    piggyback on the safepoint operation, so the caller should hold no naked oops.
-//    Furthermore, monitor::object relationships are NOT necessarily stable over this call
-//    unless the caller has made provisions to "pin" the object to the monitor, say
-//    by incrementing the monitor's _count field.
-// -- If we use a non-blocking asynchronous VM operation :
-//    the constraints above don't apply.  The safepoint will fire in the future
-//    at a more convenient time.  On the other hand the latency between posting and
-//    running the safepoint introduces or admits "slop" or laxity during which the
-//    monitor population can climb further above the threshold.  The monitor population,
-//    however, tends to converge asymptotically over time to a count that's slightly
-//    above the target value specified by MonitorBound.   That is, we avoid unbounded
-//    growth, albeit with some imprecision.
-//
 // The current implementation uses asynchronous VM operations.
 //
-// Ideally we'd check if (MonitorPopulation > MonitorBound) in omAlloc()
-// immediately before trying to grow the global list via allocation.
-// If the predicate was true then we'd induce a synchronous safepoint, wait
-// for the safepoint to complete, and then again to allocate from the global
-// free list.  This approach is much simpler and precise, admitting no "slop".
-// Unfortunately we can't safely safepoint in the midst of omAlloc(), so
-// instead we use asynchronous safepoints.
 
 static void InduceScavenge (Thread * Self, const char * Whence) {
   // Induce STW safepoint to trim monitors
@@ -812,7 +856,7 @@
   // TODO: assert thread state is reasonable
 
   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
-    if (Knob_Verbose) {
+    if (ObjectMonitor::Knob_Verbose) {
       ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
       ::fflush(stdout) ;
     }
@@ -822,7 +866,7 @@
     // The VMThread will delete the op when completed.
     VMThread::execute (new VM_ForceAsyncSafepoint()) ;
 
-    if (Knob_Verbose) {
+    if (ObjectMonitor::Knob_Verbose) {
       ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
       ::fflush(stdout) ;
     }
@@ -844,7 +888,6 @@
    assert(freetally == Self->omFreeCount, "free count off");
 }
 */
-
 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
     // A large MAXPRIVATE value reduces both list lock contention
     // and list coherency traffic, but also tends to increase the
@@ -974,12 +1017,6 @@
 // attempt failed.  This doesn't allow unbounded #s of monitors to
 // accumulate on a thread's free list.
 //
-// In the future the usage of omRelease() might change and monitors
-// could migrate between free lists.  In that case to avoid excessive
-// accumulation we could  limit omCount to (omProvision*2), otherwise return
-// the objectMonitor to the global list.  We should drain (return) in reasonable chunks.
-// That is, *not* one-at-a-time.
-
 
 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
     guarantee (m->object() == NULL, "invariant") ;
@@ -1082,15 +1119,6 @@
     TEVENT (omFlush) ;
 }
 
-
-// Get the next block in the block list.
-static inline ObjectMonitor* next(ObjectMonitor* block) {
-  assert(block->object() == CHAINMARKER, "must be a block header");
-  block = block->FreeNext ;
-  assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
-  return block;
-}
-
 // Fast path code shared by multiple functions
 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
   markOop mark = obj->mark();
@@ -1102,79 +1130,10 @@
   return ObjectSynchronizer::inflate(Thread::current(), obj);
 }
 
+
 // Note that we could encounter some performance loss through false-sharing as
 // multiple locks occupy the same $ line.  Padding might be appropriate.
 
-#define NINFLATIONLOCKS 256
-static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
-
-static markOop ReadStableMark (oop obj) {
-  markOop mark = obj->mark() ;
-  if (!mark->is_being_inflated()) {
-    return mark ;       // normal fast-path return
-  }
-
-  int its = 0 ;
-  for (;;) {
-    markOop mark = obj->mark() ;
-    if (!mark->is_being_inflated()) {
-      return mark ;    // normal fast-path return
-    }
-
-    // The object is being inflated by some other thread.
-    // The caller of ReadStableMark() must wait for inflation to complete.
-    // Avoid live-lock
-    // TODO: consider calling SafepointSynchronize::do_call_back() while
-    // spinning to see if there's a safepoint pending.  If so, immediately
-    // yielding or blocking would be appropriate.  Avoid spinning while
-    // there is a safepoint pending.
-    // TODO: add inflation contention performance counters.
-    // TODO: restrict the aggregate number of spinners.
-
-    ++its ;
-    if (its > 10000 || !os::is_MP()) {
-       if (its & 1) {
-         os::NakedYield() ;
-         TEVENT (Inflate: INFLATING - yield) ;
-       } else {
-         // Note that the following code attenuates the livelock problem but is not
-         // a complete remedy.  A more complete solution would require that the inflating
-         // thread hold the associated inflation lock.  The following code simply restricts
-         // the number of spinners to at most one.  We'll have N-2 threads blocked
-         // on the inflationlock, 1 thread holding the inflation lock and using
-         // a yield/park strategy, and 1 thread in the midst of inflation.
-         // A more refined approach would be to change the encoding of INFLATING
-         // to allow encapsulation of a native thread pointer.  Threads waiting for
-         // inflation to complete would use CAS to push themselves onto a singly linked
-         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
-         // and calling park().  When inflation was complete the thread that accomplished inflation
-         // would detach the list and set the markword to inflated with a single CAS and
-         // then for each thread on the list, set the flag and unpark() the thread.
-         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
-         // wakes at most one thread whereas we need to wake the entire list.
-         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
-         int YieldThenBlock = 0 ;
-         assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
-         assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
-         Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
-         while (obj->mark() == markOopDesc::INFLATING()) {
-           // Beware: NakedYield() is advisory and has almost no effect on some platforms
-           // so we periodically call Self->_ParkEvent->park(1).
-           // We use a mixed spin/yield/block mechanism.
-           if ((YieldThenBlock++) >= 16) {
-              Thread::current()->_ParkEvent->park(1) ;
-           } else {
-              os::NakedYield() ;
-           }
-         }
-         Thread::muxRelease (InflationLocks + ix ) ;
-         TEVENT (Inflate: INFLATING - yield/park) ;
-       }
-    } else {
-       SpinPause() ;       // SMP-polite spinning
-    }
-  }
-}
 
 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
   // Inflate mutates the heap ...
@@ -1242,7 +1201,7 @@
           m->_Responsible  = NULL ;
           m->OwnerIsThread = 0 ;
           m->_recursions   = 0 ;
-          m->_SpinDuration = Knob_SpinLimit ;   // Consider: maintain by type/class
+          m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;   // Consider: maintain by type/class
 
           markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
           if (cmp != mark) {
@@ -1302,7 +1261,7 @@
 
           // Hopefully the performance counters are allocated on distinct cache lines
           // to avoid false sharing on MP systems ...
-          if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
+          if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
           TEVENT(Inflate: overwrite stacklock) ;
           if (TraceMonitorInflation) {
             if (object->is_instance()) {
@@ -1335,7 +1294,7 @@
       m->OwnerIsThread = 1 ;
       m->_recursions   = 0 ;
       m->_Responsible  = NULL ;
-      m->_SpinDuration = Knob_SpinLimit ;       // consider: keep metastats by type/class
+      m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;       // consider: keep metastats by type/class
 
       if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
           m->set_object (NULL) ;
@@ -1352,7 +1311,7 @@
 
       // Hopefully the performance counters are allocated on distinct
       // cache lines to avoid false sharing on MP systems ...
-      if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
+      if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
       TEVENT(Inflate: overwrite neutral) ;
       if (TraceMonitorInflation) {
         if (object->is_instance()) {
@@ -1366,547 +1325,9 @@
   }
 }
 
-
-// This the fast monitor enter. The interpreter and compiler use
-// some assembly copies of this code. Make sure update those code
-// if the following function is changed. The implementation is
-// extremely sensitive to race condition. Be careful.
-
-void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
- if (UseBiasedLocking) {
-    if (!SafepointSynchronize::is_at_safepoint()) {
-      BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
-      if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
-        return;
-      }
-    } else {
-      assert(!attempt_rebias, "can not rebias toward VM thread");
-      BiasedLocking::revoke_at_safepoint(obj);
-    }
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
- }
-
- slow_enter (obj, lock, THREAD) ;
-}
-
-void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
-  assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
-  // if displaced header is null, the previous enter is recursive enter, no-op
-  markOop dhw = lock->displaced_header();
-  markOop mark ;
-  if (dhw == NULL) {
-     // Recursive stack-lock.
-     // Diagnostics -- Could be: stack-locked, inflating, inflated.
-     mark = object->mark() ;
-     assert (!mark->is_neutral(), "invariant") ;
-     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
-        assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
-     }
-     if (mark->has_monitor()) {
-        ObjectMonitor * m = mark->monitor() ;
-        assert(((oop)(m->object()))->mark() == mark, "invariant") ;
-        assert(m->is_entered(THREAD), "invariant") ;
-     }
-     return ;
-  }
-
-  mark = object->mark() ;
-
-  // If the object is stack-locked by the current thread, try to
-  // swing the displaced header from the box back to the mark.
-  if (mark == (markOop) lock) {
-     assert (dhw->is_neutral(), "invariant") ;
-     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
-        TEVENT (fast_exit: release stacklock) ;
-        return;
-     }
-  }
-
-  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
-}
-
-// This routine is used to handle interpreter/compiler slow case
-// We don't need to use fast path here, because it must have been
-// failed in the interpreter/compiler code.
-void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
-  markOop mark = obj->mark();
-  assert(!mark->has_bias_pattern(), "should not see bias pattern here");
-
-  if (mark->is_neutral()) {
-    // Anticipate successful CAS -- the ST of the displaced mark must
-    // be visible <= the ST performed by the CAS.
-    lock->set_displaced_header(mark);
-    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
-      TEVENT (slow_enter: release stacklock) ;
-      return ;
-    }
-    // Fall through to inflate() ...
-  } else
-  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
-    assert(lock != mark->locker(), "must not re-lock the same lock");
-    assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
-    lock->set_displaced_header(NULL);
-    return;
-  }
-
-#if 0
-  // The following optimization isn't particularly useful.
-  if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
-    lock->set_displaced_header (NULL) ;
-    return ;
-  }
-#endif
-
-  // The object header will never be displaced to this lock,
-  // so it does not matter what the value is, except that it
-  // must be non-zero to avoid looking like a re-entrant lock,
-  // and must not look locked either.
-  lock->set_displaced_header(markOopDesc::unused_mark());
-  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
-}
-
-// This routine is used to handle interpreter/compiler slow case
-// We don't need to use fast path here, because it must have
-// failed in the interpreter/compiler code. Simply use the heavy
-// weight monitor should be ok, unless someone find otherwise.
-void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
-  fast_exit (object, lock, THREAD) ;
-}
-
-// NOTE: must use heavy weight monitor to handle jni monitor enter
-void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
-  // the current locking is from JNI instead of Java code
-  TEVENT (jni_enter) ;
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-  THREAD->set_current_pending_monitor_is_from_java(false);
-  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
-  THREAD->set_current_pending_monitor_is_from_java(true);
-}
-
-// NOTE: must use heavy weight monitor to handle jni monitor enter
-bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-
-  ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
-  return monitor->try_enter(THREAD);
-}
-
-
-// NOTE: must use heavy weight monitor to handle jni monitor exit
-void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
-  TEVENT (jni_exit) ;
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-  }
-  assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-
-  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
-  // If this thread has locked the object, exit the monitor.  Note:  can't use
-  // monitor->check(CHECK); must exit even if an exception is pending.
-  if (monitor->check(THREAD)) {
-     monitor->exit(THREAD);
-  }
-}
-
-// complete_exit()/reenter() are used to wait on a nested lock
-// i.e. to give up an outer lock completely and then re-enter
-// Used when holding nested locks - lock acquisition order: lock1 then lock2
-//  1) complete_exit lock1 - saving recursion count
-//  2) wait on lock2
-//  3) when notified on lock2, unlock lock2
-//  4) reenter lock1 with original recursion count
-//  5) lock lock2
-// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
-intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
-  TEVENT (complete_exit) ;
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-
-  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
-
-  return monitor->complete_exit(THREAD);
-}
-
-// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
-void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
-  TEVENT (reenter) ;
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-
-  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
-
-  monitor->reenter(recursion, THREAD);
-}
-
-// This exists only as a workaround of dtrace bug 6254741
-int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
-  DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
-  return 0;
-}
-
-// NOTE: must use heavy weight monitor to handle wait()
-void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-  if (millis < 0) {
-    TEVENT (wait - throw IAX) ;
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
-  }
-  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
-  DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
-  monitor->wait(millis, true, THREAD);
-
-  /* This dummy call is in place to get around dtrace bug 6254741.  Once
-     that's fixed we can uncomment the following line and remove the call */
-  // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
-  dtrace_waited_probe(monitor, obj, THREAD);
-}
-
-void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-  if (millis < 0) {
-    TEVENT (wait - throw IAX) ;
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
-  }
-  ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
-}
-
-void ObjectSynchronizer::notify(Handle obj, TRAPS) {
- if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-
-  markOop mark = obj->mark();
-  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
-    return;
-  }
-  ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
-}
-
-// NOTE: see comment of notify()
-void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-
-  markOop mark = obj->mark();
-  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
-    return;
-  }
-  ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
-}
-
-intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
-  if (UseBiasedLocking) {
-    // NOTE: many places throughout the JVM do not expect a safepoint
-    // to be taken here, in particular most operations on perm gen
-    // objects. However, we only ever bias Java instances and all of
-    // the call sites of identity_hash that might revoke biases have
-    // been checked to make sure they can handle a safepoint. The
-    // added check of the bias pattern is to avoid useless calls to
-    // thread-local storage.
-    if (obj->mark()->has_bias_pattern()) {
-      // Box and unbox the raw reference just in case we cause a STW safepoint.
-      Handle hobj (Self, obj) ;
-      // Relaxing assertion for bug 6320749.
-      assert (Universe::verify_in_progress() ||
-              !SafepointSynchronize::is_at_safepoint(),
-             "biases should not be seen by VM thread here");
-      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
-      obj = hobj() ;
-      assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-    }
-  }
+// Note that we could encounter some performance loss through false-sharing as
+// multiple locks occupy the same $ line.  Padding might be appropriate.
 
-  // hashCode() is a heap mutator ...
-  // Relaxing assertion for bug 6320749.
-  assert (Universe::verify_in_progress() ||
-          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
-  assert (Universe::verify_in_progress() ||
-          Self->is_Java_thread() , "invariant") ;
-  assert (Universe::verify_in_progress() ||
-         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
-
-  ObjectMonitor* monitor = NULL;
-  markOop temp, test;
-  intptr_t hash;
-  markOop mark = ReadStableMark (obj);
-
-  // object should remain ineligible for biased locking
-  assert (!mark->has_bias_pattern(), "invariant") ;
-
-  if (mark->is_neutral()) {
-    hash = mark->hash();              // this is a normal header
-    if (hash) {                       // if it has hash, just return it
-      return hash;
-    }
-    hash = get_next_hash(Self, obj);  // allocate a new hash code
-    temp = mark->copy_set_hash(hash); // merge the hash code into header
-    // use (machine word version) atomic operation to install the hash
-    test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
-    if (test == mark) {
-      return hash;
-    }
-    // If atomic operation failed, we must inflate the header
-    // into heavy weight monitor. We could add more code here
-    // for fast path, but it does not worth the complexity.
-  } else if (mark->has_monitor()) {
-    monitor = mark->monitor();
-    temp = monitor->header();
-    assert (temp->is_neutral(), "invariant") ;
-    hash = temp->hash();
-    if (hash) {
-      return hash;
-    }
-    // Skip to the following code to reduce code size
-  } else if (Self->is_lock_owned((address)mark->locker())) {
-    temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
-    assert (temp->is_neutral(), "invariant") ;
-    hash = temp->hash();              // by current thread, check if the displaced
-    if (hash) {                       // header contains hash code
-      return hash;
-    }
-    // WARNING:
-    //   The displaced header is strictly immutable.
-    // It can NOT be changed in ANY cases. So we have
-    // to inflate the header into heavyweight monitor
-    // even the current thread owns the lock. The reason
-    // is the BasicLock (stack slot) will be asynchronously
-    // read by other threads during the inflate() function.
-    // Any change to stack may not propagate to other threads
-    // correctly.
-  }
-
-  // Inflate the monitor to set hash code
-  monitor = ObjectSynchronizer::inflate(Self, obj);
-  // Load displaced header and check it has hash code
-  mark = monitor->header();
-  assert (mark->is_neutral(), "invariant") ;
-  hash = mark->hash();
-  if (hash == 0) {
-    hash = get_next_hash(Self, obj);
-    temp = mark->copy_set_hash(hash); // merge hash code into header
-    assert (temp->is_neutral(), "invariant") ;
-    test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
-    if (test != mark) {
-      // The only update to the header in the monitor (outside GC)
-      // is install the hash code. If someone add new usage of
-      // displaced header, please update this code
-      hash = test->hash();
-      assert (test->is_neutral(), "invariant") ;
-      assert (hash != 0, "Trivial unexpected object/monitor header usage.");
-    }
-  }
-  // We finally get the hash
-  return hash;
-}
-
-// Deprecated -- use FastHashCode() instead.
-
-intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
-  return FastHashCode (Thread::current(), obj()) ;
-}
-
-bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
-                                                   Handle h_obj) {
-  if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(h_obj, false, thread);
-    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-
-  assert(thread == JavaThread::current(), "Can only be called on current thread");
-  oop obj = h_obj();
-
-  markOop mark = ReadStableMark (obj) ;
-
-  // Uncontended case, header points to stack
-  if (mark->has_locker()) {
-    return thread->is_lock_owned((address)mark->locker());
-  }
-  // Contended case, header points to ObjectMonitor (tagged pointer)
-  if (mark->has_monitor()) {
-    ObjectMonitor* monitor = mark->monitor();
-    return monitor->is_entered(thread) != 0 ;
-  }
-  // Unlocked case, header in place
-  assert(mark->is_neutral(), "sanity check");
-  return false;
-}
-
-// Be aware of this method could revoke bias of the lock object.
-// This method querys the ownership of the lock handle specified by 'h_obj'.
-// If the current thread owns the lock, it returns owner_self. If no
-// thread owns the lock, it returns owner_none. Otherwise, it will return
-// ower_other.
-ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
-(JavaThread *self, Handle h_obj) {
-  // The caller must beware this method can revoke bias, and
-  // revocation can result in a safepoint.
-  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
-  assert (self->thread_state() != _thread_blocked , "invariant") ;
-
-  // Possible mark states: neutral, biased, stack-locked, inflated
-
-  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
-    // CASE: biased
-    BiasedLocking::revoke_and_rebias(h_obj, false, self);
-    assert(!h_obj->mark()->has_bias_pattern(),
-           "biases should be revoked by now");
-  }
-
-  assert(self == JavaThread::current(), "Can only be called on current thread");
-  oop obj = h_obj();
-  markOop mark = ReadStableMark (obj) ;
-
-  // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
-  if (mark->has_locker()) {
-    return self->is_lock_owned((address)mark->locker()) ?
-      owner_self : owner_other;
-  }
-
-  // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
-  // The Object:ObjectMonitor relationship is stable as long as we're
-  // not at a safepoint.
-  if (mark->has_monitor()) {
-    void * owner = mark->monitor()->_owner ;
-    if (owner == NULL) return owner_none ;
-    return (owner == self ||
-            self->is_lock_owned((address)owner)) ? owner_self : owner_other;
-  }
-
-  // CASE: neutral
-  assert(mark->is_neutral(), "sanity check");
-  return owner_none ;           // it's unlocked
-}
-
-// FIXME: jvmti should call this
-JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
-  if (UseBiasedLocking) {
-    if (SafepointSynchronize::is_at_safepoint()) {
-      BiasedLocking::revoke_at_safepoint(h_obj);
-    } else {
-      BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
-    }
-    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-  }
-
-  oop obj = h_obj();
-  address owner = NULL;
-
-  markOop mark = ReadStableMark (obj) ;
-
-  // Uncontended case, header points to stack
-  if (mark->has_locker()) {
-    owner = (address) mark->locker();
-  }
-
-  // Contended case, header points to ObjectMonitor (tagged pointer)
-  if (mark->has_monitor()) {
-    ObjectMonitor* monitor = mark->monitor();
-    assert(monitor != NULL, "monitor should be non-null");
-    owner = (address) monitor->owner();
-  }
-
-  if (owner != NULL) {
-    return Threads::owning_thread_from_monitor_owner(owner, doLock);
-  }
-
-  // Unlocked case, header in place
-  // Cannot have assertion since this object may have been
-  // locked by another thread when reaching here.
-  // assert(mark->is_neutral(), "sanity check");
-
-  return NULL;
-}
-
-// Iterate through monitor cache and attempt to release thread's monitors
-// Gives up on a particular monitor if an exception occurs, but continues
-// the overall iteration, swallowing the exception.
-class ReleaseJavaMonitorsClosure: public MonitorClosure {
-private:
-  TRAPS;
-
-public:
-  ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
-  void do_monitor(ObjectMonitor* mid) {
-    if (mid->owner() == THREAD) {
-      (void)mid->complete_exit(CHECK);
-    }
-  }
-};
-
-// Release all inflated monitors owned by THREAD.  Lightweight monitors are
-// ignored.  This is meant to be called during JNI thread detach which assumes
-// all remaining monitors are heavyweight.  All exceptions are swallowed.
-// Scanning the extant monitor list can be time consuming.
-// A simple optimization is to add a per-thread flag that indicates a thread
-// called jni_monitorenter() during its lifetime.
-//
-// Instead of No_Savepoint_Verifier it might be cheaper to
-// use an idiom of the form:
-//   auto int tmp = SafepointSynchronize::_safepoint_counter ;
-//   <code that must not run at safepoint>
-//   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
-// Since the tests are extremely cheap we could leave them enabled
-// for normal product builds.
-
-void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
-  assert(THREAD == JavaThread::current(), "must be current Java thread");
-  No_Safepoint_Verifier nsv ;
-  ReleaseJavaMonitorsClosure rjmc(THREAD);
-  Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
-  ObjectSynchronizer::monitors_iterate(&rjmc);
-  Thread::muxRelease(&ListLock);
-  THREAD->clear_pending_exception();
-}
-
-// Visitors ...
-
-void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
-  ObjectMonitor* block = gBlockList;
-  ObjectMonitor* mid;
-  while (block) {
-    assert(block->object() == CHAINMARKER, "must be a block header");
-    for (int i = _BLOCKSIZE - 1; i > 0; i--) {
-      mid = block + i;
-      oop object = (oop) mid->object();
-      if (object != NULL) {
-        closure->do_monitor(mid);
-      }
-    }
-    block = (ObjectMonitor*) block->FreeNext;
-  }
-}
-
-void ObjectSynchronizer::oops_do(OopClosure* f) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
-    assert(block->object() == CHAINMARKER, "must be a block header");
-    for (int i = 1; i < _BLOCKSIZE; i++) {
-      ObjectMonitor* mid = &block[i];
-      if (mid->object() != NULL) {
-        f->do_oop((oop*)mid->object_addr());
-      }
-    }
-  }
-}
 
 // Deflate_idle_monitors() is called at all safepoints, immediately
 // after all mutators are stopped, but before any objects have moved.
@@ -1936,12 +1357,11 @@
 // which in turn can mean large(r) numbers of objectmonitors in circulation.
 // This is an unfortunate aspect of this design.
 //
-// Another refinement would be to refrain from calling deflate_idle_monitors()
-// except at stop-the-world points associated with garbage collections.
-//
-// An even better solution would be to deflate on-the-fly, aggressively,
-// at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
 
+enum ManifestConstants {
+    ClearResponsibleAtSTW   = 0,
+    MaximumRecheckInterval  = 1000
+} ;
 
 // Deflate a single monitor if not in use
 // Return true if deflated, false if in use
@@ -2088,7 +1508,7 @@
 
   // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
 
-  if (Knob_Verbose) {
+  if (ObjectMonitor::Knob_Verbose) {
     ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
         nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
         MonitorPopulation, MonitorFreeCount) ;
@@ -2107,8 +1527,8 @@
   }
   Thread::muxRelease (&ListLock) ;
 
-  if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
-  if (_sync_MonExtant  != NULL) _sync_MonExtant ->set_value(nInCirculation);
+  if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
+  if (ObjectMonitor::_sync_MonExtant  != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
 
   // TODO: Add objectMonitor leak detection.
   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
@@ -2116,2810 +1536,49 @@
   GVars.stwCycle ++ ;
 }
 
-// A macro is used below because there may already be a pending
-// exception which should not abort the execution of the routines
-// which use this (which is why we don't put this into check_slow and
-// call it with a CHECK argument).
-
-#define CHECK_OWNER()                                                             \
-  do {                                                                            \
-    if (THREAD != _owner) {                                                       \
-      if (THREAD->is_lock_owned((address) _owner)) {                              \
-        _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
-        _recursions = 0;                                                          \
-        OwnerIsThread = 1 ;                                                       \
-      } else {                                                                    \
-        TEVENT (Throw IMSX) ;                                                     \
-        THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
-      }                                                                           \
-    }                                                                             \
-  } while (false)
-
-// TODO-FIXME: eliminate ObjectWaiters.  Replace this visitor/enumerator
-// interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
-
-ObjectWaiter* ObjectMonitor::first_waiter() {
-  return _WaitSet;
-}
-
-ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
-  return o->_next;
-}
-
-Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
-  return o->_thread;
-}
-
-// initialize the monitor, exception the semaphore, all other fields
-// are simple integers or pointers
-ObjectMonitor::ObjectMonitor() {
-  _header       = NULL;
-  _count        = 0;
-  _waiters      = 0,
-  _recursions   = 0;
-  _object       = NULL;
-  _owner        = NULL;
-  _WaitSet      = NULL;
-  _WaitSetLock  = 0 ;
-  _Responsible  = NULL ;
-  _succ         = NULL ;
-  _cxq          = NULL ;
-  FreeNext      = NULL ;
-  _EntryList    = NULL ;
-  _SpinFreq     = 0 ;
-  _SpinClock    = 0 ;
-  OwnerIsThread = 0 ;
-}
-
-ObjectMonitor::~ObjectMonitor() {
-   // TODO: Add asserts ...
-   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
-   // _count == 0 _EntryList  == NULL etc
-}
+// Monitor cleanup on JavaThread::exit
 
-intptr_t ObjectMonitor::is_busy() const {
-  // TODO-FIXME: merge _count and _waiters.
-  // TODO-FIXME: assert _owner == null implies _recursions = 0
-  // TODO-FIXME: assert _WaitSet != null implies _count > 0
-  return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
-}
-
-void ObjectMonitor::Recycle () {
-  // TODO: add stronger asserts ...
-  // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
-  // _count == 0 EntryList  == NULL
-  // _recursions == 0 _WaitSet == NULL
-  // TODO: assert (is_busy()|_recursions) == 0
-  _succ          = NULL ;
-  _EntryList     = NULL ;
-  _cxq           = NULL ;
-  _WaitSet       = NULL ;
-  _recursions    = 0 ;
-  _SpinFreq      = 0 ;
-  _SpinClock     = 0 ;
-  OwnerIsThread  = 0 ;
-}
-
-// WaitSet management ...
+// Iterate through monitor cache and attempt to release thread's monitors
+// Gives up on a particular monitor if an exception occurs, but continues
+// the overall iteration, swallowing the exception.
+class ReleaseJavaMonitorsClosure: public MonitorClosure {
+private:
+  TRAPS;
 
-inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
-  assert(node != NULL, "should not dequeue NULL node");
-  assert(node->_prev == NULL, "node already in list");
-  assert(node->_next == NULL, "node already in list");
-  // put node at end of queue (circular doubly linked list)
-  if (_WaitSet == NULL) {
-    _WaitSet = node;
-    node->_prev = node;
-    node->_next = node;
-  } else {
-    ObjectWaiter* head = _WaitSet ;
-    ObjectWaiter* tail = head->_prev;
-    assert(tail->_next == head, "invariant check");
-    tail->_next = node;
-    head->_prev = node;
-    node->_next = head;
-    node->_prev = tail;
-  }
-}
-
-inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
-  // dequeue the very first waiter
-  ObjectWaiter* waiter = _WaitSet;
-  if (waiter) {
-    DequeueSpecificWaiter(waiter);
-  }
-  return waiter;
-}
-
-inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
-  assert(node != NULL, "should not dequeue NULL node");
-  assert(node->_prev != NULL, "node already removed from list");
-  assert(node->_next != NULL, "node already removed from list");
-  // when the waiter has woken up because of interrupt,
-  // timeout or other spurious wake-up, dequeue the
-  // waiter from waiting list
-  ObjectWaiter* next = node->_next;
-  if (next == node) {
-    assert(node->_prev == node, "invariant check");
-    _WaitSet = NULL;
-  } else {
-    ObjectWaiter* prev = node->_prev;
-    assert(prev->_next == node, "invariant check");
-    assert(next->_prev == node, "invariant check");
-    next->_prev = prev;
-    prev->_next = next;
-    if (_WaitSet == node) {
-      _WaitSet = next;
+public:
+  ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
+  void do_monitor(ObjectMonitor* mid) {
+    if (mid->owner() == THREAD) {
+      (void)mid->complete_exit(CHECK);
     }
   }
-  node->_next = NULL;
-  node->_prev = NULL;
-}
-
-static char * kvGet (char * kvList, const char * Key) {
-    if (kvList == NULL) return NULL ;
-    size_t n = strlen (Key) ;
-    char * Search ;
-    for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
-        if (strncmp (Search, Key, n) == 0) {
-            if (Search[n] == '=') return Search + n + 1 ;
-            if (Search[n] == 0)   return (char *) "1" ;
-        }
-    }
-    return NULL ;
-}
-
-static int kvGetInt (char * kvList, const char * Key, int Default) {
-    char * v = kvGet (kvList, Key) ;
-    int rslt = v ? ::strtol (v, NULL, 0) : Default ;
-    if (Knob_ReportSettings && v != NULL) {
-        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
-        ::fflush (stdout) ;
-    }
-    return rslt ;
-}
-
-// By convention we unlink a contending thread from EntryList|cxq immediately
-// after the thread acquires the lock in ::enter().  Equally, we could defer
-// unlinking the thread until ::exit()-time.
-
-void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
-{
-    assert (_owner == Self, "invariant") ;
-    assert (SelfNode->_thread == Self, "invariant") ;
-
-    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
-        // Normal case: remove Self from the DLL EntryList .
-        // This is a constant-time operation.
-        ObjectWaiter * nxt = SelfNode->_next ;
-        ObjectWaiter * prv = SelfNode->_prev ;
-        if (nxt != NULL) nxt->_prev = prv ;
-        if (prv != NULL) prv->_next = nxt ;
-        if (SelfNode == _EntryList ) _EntryList = nxt ;
-        assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
-        assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
-        TEVENT (Unlink from EntryList) ;
-    } else {
-        guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
-        // Inopportune interleaving -- Self is still on the cxq.
-        // This usually means the enqueue of self raced an exiting thread.
-        // Normally we'll find Self near the front of the cxq, so
-        // dequeueing is typically fast.  If needbe we can accelerate
-        // this with some MCS/CHL-like bidirectional list hints and advisory
-        // back-links so dequeueing from the interior will normally operate
-        // in constant-time.
-        // Dequeue Self from either the head (with CAS) or from the interior
-        // with a linear-time scan and normal non-atomic memory operations.
-        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
-        // and then unlink Self from EntryList.  We have to drain eventually,
-        // so it might as well be now.
-
-        ObjectWaiter * v = _cxq ;
-        assert (v != NULL, "invariant") ;
-        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
-            // The CAS above can fail from interference IFF a "RAT" arrived.
-            // In that case Self must be in the interior and can no longer be
-            // at the head of cxq.
-            if (v == SelfNode) {
-                assert (_cxq != v, "invariant") ;
-                v = _cxq ;          // CAS above failed - start scan at head of list
-            }
-            ObjectWaiter * p ;
-            ObjectWaiter * q = NULL ;
-            for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
-                q = p ;
-                assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
-            }
-            assert (v != SelfNode,  "invariant") ;
-            assert (p == SelfNode,  "Node not found on cxq") ;
-            assert (p != _cxq,      "invariant") ;
-            assert (q != NULL,      "invariant") ;
-            assert (q->_next == p,  "invariant") ;
-            q->_next = p->_next ;
-        }
-        TEVENT (Unlink from cxq) ;
-    }
-
-    // Diagnostic hygiene ...
-    SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
-    SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
-    SelfNode->TState = ObjectWaiter::TS_RUN ;
-}
-
-// Caveat: TryLock() is not necessarily serializing if it returns failure.
-// Callers must compensate as needed.
-
-int ObjectMonitor::TryLock (Thread * Self) {
-   for (;;) {
-      void * own = _owner ;
-      if (own != NULL) return 0 ;
-      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
-         // Either guarantee _recursions == 0 or set _recursions = 0.
-         assert (_recursions == 0, "invariant") ;
-         assert (_owner == Self, "invariant") ;
-         // CONSIDER: set or assert that OwnerIsThread == 1
-         return 1 ;
-      }
-      // The lock had been free momentarily, but we lost the race to the lock.
-      // Interference -- the CAS failed.
-      // We can either return -1 or retry.
-      // Retry doesn't make as much sense because the lock was just acquired.
-      if (true) return -1 ;
-   }
-}
-
-// NotRunnable() -- informed spinning
-//
-// Don't bother spinning if the owner is not eligible to drop the lock.
-// Peek at the owner's schedctl.sc_state and Thread._thread_values and
-// spin only if the owner thread is _thread_in_Java or _thread_in_vm.
-// The thread must be runnable in order to drop the lock in timely fashion.
-// If the _owner is not runnable then spinning will not likely be
-// successful (profitable).
-//
-// Beware -- the thread referenced by _owner could have died
-// so a simply fetch from _owner->_thread_state might trap.
-// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
-// Because of the lifecycle issues the schedctl and _thread_state values
-// observed by NotRunnable() might be garbage.  NotRunnable must
-// tolerate this and consider the observed _thread_state value
-// as advisory.
-//
-// Beware too, that _owner is sometimes a BasicLock address and sometimes
-// a thread pointer.  We differentiate the two cases with OwnerIsThread.
-// Alternately, we might tag the type (thread pointer vs basiclock pointer)
-// with the LSB of _owner.  Another option would be to probablistically probe
-// the putative _owner->TypeTag value.
-//
-// Checking _thread_state isn't perfect.  Even if the thread is
-// in_java it might be blocked on a page-fault or have been preempted
-// and sitting on a ready/dispatch queue.  _thread state in conjunction
-// with schedctl.sc_state gives us a good picture of what the
-// thread is doing, however.
-//
-// TODO: check schedctl.sc_state.
-// We'll need to use SafeFetch32() to read from the schedctl block.
-// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
-//
-// The return value from NotRunnable() is *advisory* -- the
-// result is based on sampling and is not necessarily coherent.
-// The caller must tolerate false-negative and false-positive errors.
-// Spinning, in general, is probabilistic anyway.
-
-
-int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
-    // Check either OwnerIsThread or ox->TypeTag == 2BAD.
-    if (!OwnerIsThread) return 0 ;
-
-    if (ox == NULL) return 0 ;
-
-    // Avoid transitive spinning ...
-    // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
-    // Immediately after T1 acquires L it's possible that T2, also
-    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
-    // This occurs transiently after T1 acquired L but before
-    // T1 managed to clear T1.Stalled.  T2 does not need to abort
-    // its spin in this circumstance.
-    intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
-
-    if (BlockedOn == 1) return 1 ;
-    if (BlockedOn != 0) {
-      return BlockedOn != intptr_t(this) && _owner == ox ;
-    }
-
-    assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
-    int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
-    // consider also: jst != _thread_in_Java -- but that's overspecific.
-    return jst == _thread_blocked || jst == _thread_in_native ;
-}
-
-
-// Adaptive spin-then-block - rational spinning
-//
-// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
-// algorithm.  On high order SMP systems it would be better to start with
-// a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
-// a contending thread could enqueue itself on the cxq and then spin locally
-// on a thread-specific variable such as its ParkEvent._Event flag.
-// That's left as an exercise for the reader.  Note that global spinning is
-// not problematic on Niagara, as the L2$ serves the interconnect and has both
-// low latency and massive bandwidth.
-//
-// Broadly, we can fix the spin frequency -- that is, the % of contended lock
-// acquisition attempts where we opt to spin --  at 100% and vary the spin count
-// (duration) or we can fix the count at approximately the duration of
-// a context switch and vary the frequency.   Of course we could also
-// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
-// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
-//
-// This implementation varies the duration "D", where D varies with
-// the success rate of recent spin attempts. (D is capped at approximately
-// length of a round-trip context switch).  The success rate for recent
-// spin attempts is a good predictor of the success rate of future spin
-// attempts.  The mechanism adapts automatically to varying critical
-// section length (lock modality), system load and degree of parallelism.
-// D is maintained per-monitor in _SpinDuration and is initialized
-// optimistically.  Spin frequency is fixed at 100%.
-//
-// Note that _SpinDuration is volatile, but we update it without locks
-// or atomics.  The code is designed so that _SpinDuration stays within
-// a reasonable range even in the presence of races.  The arithmetic
-// operations on _SpinDuration are closed over the domain of legal values,
-// so at worst a race will install and older but still legal value.
-// At the very worst this introduces some apparent non-determinism.
-// We might spin when we shouldn't or vice-versa, but since the spin
-// count are relatively short, even in the worst case, the effect is harmless.
-//
-// Care must be taken that a low "D" value does not become an
-// an absorbing state.  Transient spinning failures -- when spinning
-// is overall profitable -- should not cause the system to converge
-// on low "D" values.  We want spinning to be stable and predictable
-// and fairly responsive to change and at the same time we don't want
-// it to oscillate, become metastable, be "too" non-deterministic,
-// or converge on or enter undesirable stable absorbing states.
-//
-// We implement a feedback-based control system -- using past behavior
-// to predict future behavior.  We face two issues: (a) if the
-// input signal is random then the spin predictor won't provide optimal
-// results, and (b) if the signal frequency is too high then the control
-// system, which has some natural response lag, will "chase" the signal.
-// (b) can arise from multimodal lock hold times.  Transient preemption
-// can also result in apparent bimodal lock hold times.
-// Although sub-optimal, neither condition is particularly harmful, as
-// in the worst-case we'll spin when we shouldn't or vice-versa.
-// The maximum spin duration is rather short so the failure modes aren't bad.
-// To be conservative, I've tuned the gain in system to bias toward
-// _not spinning.  Relatedly, the system can sometimes enter a mode where it
-// "rings" or oscillates between spinning and not spinning.  This happens
-// when spinning is just on the cusp of profitability, however, so the
-// situation is not dire.  The state is benign -- there's no need to add
-// hysteresis control to damp the transition rate between spinning and
-// not spinning.
-//
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-//
-// Spin-then-block strategies ...
-//
-// Thoughts on ways to improve spinning :
-//
-// *  Periodically call {psr_}getloadavg() while spinning, and
-//    permit unbounded spinning if the load average is <
-//    the number of processors.  Beware, however, that getloadavg()
-//    is exceptionally fast on solaris (about 1/10 the cost of a full
-//    spin cycle, but quite expensive on linux.  Beware also, that
-//    multiple JVMs could "ring" or oscillate in a feedback loop.
-//    Sufficient damping would solve that problem.
-//
-// *  We currently use spin loops with iteration counters to approximate
-//    spinning for some interval.  Given the availability of high-precision
-//    time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
-//    someday reimplement the spin loops to duration-based instead of iteration-based.
-//
-// *  Don't spin if there are more than N = (CPUs/2) threads
-//        currently spinning on the monitor (or globally).
-//    That is, limit the number of concurrent spinners.
-//    We might also limit the # of spinners in the JVM, globally.
-//
-// *  If a spinning thread observes _owner change hands it should
-//    abort the spin (and park immediately) or at least debit
-//    the spin counter by a large "penalty".
-//
-// *  Classically, the spin count is either K*(CPUs-1) or is a
-//        simple constant that approximates the length of a context switch.
-//    We currently use a value -- computed by a special utility -- that
-//    approximates round-trip context switch times.
-//
-// *  Normally schedctl_start()/_stop() is used to advise the kernel
-//    to avoid preempting threads that are running in short, bounded
-//    critical sections.  We could use the schedctl hooks in an inverted
-//    sense -- spinners would set the nopreempt flag, but poll the preempt
-//    pending flag.  If a spinner observed a pending preemption it'd immediately
-//    abort the spin and park.   As such, the schedctl service acts as
-//    a preemption warning mechanism.
-//
-// *  In lieu of spinning, if the system is running below saturation
-//    (that is, loadavg() << #cpus), we can instead suppress futile
-//    wakeup throttling, or even wake more than one successor at exit-time.
-//    The net effect is largely equivalent to spinning.  In both cases,
-//    contending threads go ONPROC and opportunistically attempt to acquire
-//    the lock, decreasing lock handover latency at the expense of wasted
-//    cycles and context switching.
-//
-// *  We might to spin less after we've parked as the thread will
-//    have less $ and TLB affinity with the processor.
-//    Likewise, we might spin less if we come ONPROC on a different
-//    processor or after a long period (>> rechose_interval).
-//
-// *  A table-driven state machine similar to Solaris' dispadmin scheduling
-//    tables might be a better design.  Instead of encoding information in
-//    _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
-//    discrete states.   Success or failure during a spin would drive
-//    state transitions, and each state node would contain a spin count.
-//
-// *  If the processor is operating in a mode intended to conserve power
-//    (such as Intel's SpeedStep) or to reduce thermal output (thermal
-//    step-down mode) then the Java synchronization subsystem should
-//    forgo spinning.
-//
-// *  The minimum spin duration should be approximately the worst-case
-//    store propagation latency on the platform.  That is, the time
-//    it takes a store on CPU A to become visible on CPU B, where A and
-//    B are "distant".
-//
-// *  We might want to factor a thread's priority in the spin policy.
-//    Threads with a higher priority might spin for slightly longer.
-//    Similarly, if we use back-off in the TATAS loop, lower priority
-//    threads might back-off longer.  We don't currently use a
-//    thread's priority when placing it on the entry queue.  We may
-//    want to consider doing so in future releases.
-//
-// *  We might transiently drop a thread's scheduling priority while it spins.
-//    SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
-//    would suffice.  We could even consider letting the thread spin indefinitely at
-//    a depressed or "idle" priority.  This brings up fairness issues, however --
-//    in a saturated system a thread would with a reduced priority could languish
-//    for extended periods on the ready queue.
-//
-// *  While spinning try to use the otherwise wasted time to help the VM make
-//    progress:
-//
-//    -- YieldTo() the owner, if the owner is OFFPROC but ready
-//       Done our remaining quantum directly to the ready thread.
-//       This helps "push" the lock owner through the critical section.
-//       It also tends to improve affinity/locality as the lock
-//       "migrates" less frequently between CPUs.
-//    -- Walk our own stack in anticipation of blocking.  Memoize the roots.
-//    -- Perform strand checking for other thread.  Unpark potential strandees.
-//    -- Help GC: trace or mark -- this would need to be a bounded unit of work.
-//       Unfortunately this will pollute our $ and TLBs.  Recall that we
-//       spin to avoid context switching -- context switching has an
-//       immediate cost in latency, a disruptive cost to other strands on a CMT
-//       processor, and an amortized cost because of the D$ and TLB cache
-//       reload transient when the thread comes back ONPROC and repopulates
-//       $s and TLBs.
-//    -- call getloadavg() to see if the system is saturated.  It'd probably
-//       make sense to call getloadavg() half way through the spin.
-//       If the system isn't at full capacity the we'd simply reset
-//       the spin counter to and extend the spin attempt.
-//    -- Doug points out that we should use the same "helping" policy
-//       in thread.yield().
-//
-// *  Try MONITOR-MWAIT on systems that support those instructions.
-//
-// *  The spin statistics that drive spin decisions & frequency are
-//    maintained in the objectmonitor structure so if we deflate and reinflate
-//    we lose spin state.  In practice this is not usually a concern
-//    as the default spin state after inflation is aggressive (optimistic)
-//    and tends toward spinning.  So in the worst case for a lock where
-//    spinning is not profitable we may spin unnecessarily for a brief
-//    period.  But then again, if a lock is contended it'll tend not to deflate
-//    in the first place.
-
-
-intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
-int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
-
-// Spinning: Fixed frequency (100%), vary duration
-
-int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
-
-    // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
-    int ctr = Knob_FixedSpin ;
-    if (ctr != 0) {
-        while (--ctr >= 0) {
-            if (TryLock (Self) > 0) return 1 ;
-            SpinPause () ;
-        }
-        return 0 ;
-    }
-
-    for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
-      if (TryLock(Self) > 0) {
-        // Increase _SpinDuration ...
-        // Note that we don't clamp SpinDuration precisely at SpinLimit.
-        // Raising _SpurDuration to the poverty line is key.
-        int x = _SpinDuration ;
-        if (x < Knob_SpinLimit) {
-           if (x < Knob_Poverty) x = Knob_Poverty ;
-           _SpinDuration = x + Knob_BonusB ;
-        }
-        return 1 ;
-      }
-      SpinPause () ;
-    }
-
-    // Admission control - verify preconditions for spinning
-    //
-    // We always spin a little bit, just to prevent _SpinDuration == 0 from
-    // becoming an absorbing state.  Put another way, we spin briefly to
-    // sample, just in case the system load, parallelism, contention, or lock
-    // modality changed.
-    //
-    // Consider the following alternative:
-    // Periodically set _SpinDuration = _SpinLimit and try a long/full
-    // spin attempt.  "Periodically" might mean after a tally of
-    // the # of failed spin attempts (or iterations) reaches some threshold.
-    // This takes us into the realm of 1-out-of-N spinning, where we
-    // hold the duration constant but vary the frequency.
-
-    ctr = _SpinDuration  ;
-    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
-    if (ctr <= 0) return 0 ;
-
-    if (Knob_SuccRestrict && _succ != NULL) return 0 ;
-    if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
-       TEVENT (Spin abort - notrunnable [TOP]);
-       return 0 ;
-    }
-
-    int MaxSpin = Knob_MaxSpinners ;
-    if (MaxSpin >= 0) {
-       if (_Spinner > MaxSpin) {
-          TEVENT (Spin abort -- too many spinners) ;
-          return 0 ;
-       }
-       // Slighty racy, but benign ...
-       Adjust (&_Spinner, 1) ;
-    }
-
-    // We're good to spin ... spin ingress.
-    // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
-    // when preparing to LD...CAS _owner, etc and the CAS is likely
-    // to succeed.
-    int hits    = 0 ;
-    int msk     = 0 ;
-    int caspty  = Knob_CASPenalty ;
-    int oxpty   = Knob_OXPenalty ;
-    int sss     = Knob_SpinSetSucc ;
-    if (sss && _succ == NULL ) _succ = Self ;
-    Thread * prv = NULL ;
-
-    // There are three ways to exit the following loop:
-    // 1.  A successful spin where this thread has acquired the lock.
-    // 2.  Spin failure with prejudice
-    // 3.  Spin failure without prejudice
-
-    while (--ctr >= 0) {
-
-      // Periodic polling -- Check for pending GC
-      // Threads may spin while they're unsafe.
-      // We don't want spinning threads to delay the JVM from reaching
-      // a stop-the-world safepoint or to steal cycles from GC.
-      // If we detect a pending safepoint we abort in order that
-      // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
-      // this thread, if safe, doesn't steal cycles from GC.
-      // This is in keeping with the "no loitering in runtime" rule.
-      // We periodically check to see if there's a safepoint pending.
-      if ((ctr & 0xFF) == 0) {
-         if (SafepointSynchronize::do_call_back()) {
-            TEVENT (Spin: safepoint) ;
-            goto Abort ;           // abrupt spin egress
-         }
-         if (Knob_UsePause & 1) SpinPause () ;
-
-         int (*scb)(intptr_t,int) = SpinCallbackFunction ;
-         if (hits > 50 && scb != NULL) {
-            int abend = (*scb)(SpinCallbackArgument, 0) ;
-         }
-      }
-
-      if (Knob_UsePause & 2) SpinPause() ;
-
-      // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
-      // This is useful on classic SMP systems, but is of less utility on
-      // N1-style CMT platforms.
-      //
-      // Trade-off: lock acquisition latency vs coherency bandwidth.
-      // Lock hold times are typically short.  A histogram
-      // of successful spin attempts shows that we usually acquire
-      // the lock early in the spin.  That suggests we want to
-      // sample _owner frequently in the early phase of the spin,
-      // but then back-off and sample less frequently as the spin
-      // progresses.  The back-off makes a good citizen on SMP big
-      // SMP systems.  Oversampling _owner can consume excessive
-      // coherency bandwidth.  Relatedly, if we _oversample _owner we
-      // can inadvertently interfere with the the ST m->owner=null.
-      // executed by the lock owner.
-      if (ctr & msk) continue ;
-      ++hits ;
-      if ((hits & 0xF) == 0) {
-        // The 0xF, above, corresponds to the exponent.
-        // Consider: (msk+1)|msk
-        msk = ((msk << 2)|3) & BackOffMask ;
-      }
-
-      // Probe _owner with TATAS
-      // If this thread observes the monitor transition or flicker
-      // from locked to unlocked to locked, then the odds that this
-      // thread will acquire the lock in this spin attempt go down
-      // considerably.  The same argument applies if the CAS fails
-      // or if we observe _owner change from one non-null value to
-      // another non-null value.   In such cases we might abort
-      // the spin without prejudice or apply a "penalty" to the
-      // spin count-down variable "ctr", reducing it by 100, say.
-
-      Thread * ox = (Thread *) _owner ;
-      if (ox == NULL) {
-         ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
-         if (ox == NULL) {
-            // The CAS succeeded -- this thread acquired ownership
-            // Take care of some bookkeeping to exit spin state.
-            if (sss && _succ == Self) {
-               _succ = NULL ;
-            }
-            if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
-
-            // Increase _SpinDuration :
-            // The spin was successful (profitable) so we tend toward
-            // longer spin attempts in the future.
-            // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
-            // If we acquired the lock early in the spin cycle it
-            // makes sense to increase _SpinDuration proportionally.
-            // Note that we don't clamp SpinDuration precisely at SpinLimit.
-            int x = _SpinDuration ;
-            if (x < Knob_SpinLimit) {
-                if (x < Knob_Poverty) x = Knob_Poverty ;
-                _SpinDuration = x + Knob_Bonus ;
-            }
-            return 1 ;
-         }
-
-         // The CAS failed ... we can take any of the following actions:
-         // * penalize: ctr -= Knob_CASPenalty
-         // * exit spin with prejudice -- goto Abort;
-         // * exit spin without prejudice.
-         // * Since CAS is high-latency, retry again immediately.
-         prv = ox ;
-         TEVENT (Spin: cas failed) ;
-         if (caspty == -2) break ;
-         if (caspty == -1) goto Abort ;
-         ctr -= caspty ;
-         continue ;
-      }
-
-      // Did lock ownership change hands ?
-      if (ox != prv && prv != NULL ) {
-          TEVENT (spin: Owner changed)
-          if (oxpty == -2) break ;
-          if (oxpty == -1) goto Abort ;
-          ctr -= oxpty ;
-      }
-      prv = ox ;
-
-      // Abort the spin if the owner is not executing.
-      // The owner must be executing in order to drop the lock.
-      // Spinning while the owner is OFFPROC is idiocy.
-      // Consider: ctr -= RunnablePenalty ;
-      if (Knob_OState && NotRunnable (Self, ox)) {
-         TEVENT (Spin abort - notrunnable);
-         goto Abort ;
-      }
-      if (sss && _succ == NULL ) _succ = Self ;
-   }
-
-   // Spin failed with prejudice -- reduce _SpinDuration.
-   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
-   // AIMD is globally stable.
-   TEVENT (Spin failure) ;
-   {
-     int x = _SpinDuration ;
-     if (x > 0) {
-        // Consider an AIMD scheme like: x -= (x >> 3) + 100
-        // This is globally sample and tends to damp the response.
-        x -= Knob_Penalty ;
-        if (x < 0) x = 0 ;
-        _SpinDuration = x ;
-     }
-   }
-
- Abort:
-   if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
-   if (sss && _succ == Self) {
-      _succ = NULL ;
-      // Invariant: after setting succ=null a contending thread
-      // must recheck-retry _owner before parking.  This usually happens
-      // in the normal usage of TrySpin(), but it's safest
-      // to make TrySpin() as foolproof as possible.
-      OrderAccess::fence() ;
-      if (TryLock(Self) > 0) return 1 ;
-   }
-   return 0 ;
-}
-
-#define TrySpin TrySpin_VaryDuration
-
-static void DeferredInitialize () {
-  if (InitDone > 0) return ;
-  if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
-      while (InitDone != 1) ;
-      return ;
-  }
-
-  // One-shot global initialization ...
-  // The initialization is idempotent, so we don't need locks.
-  // In the future consider doing this via os::init_2().
-  // SyncKnobs consist of <Key>=<Value> pairs in the style
-  // of environment variables.  Start by converting ':' to NUL.
-
-  if (SyncKnobs == NULL) SyncKnobs = "" ;
-
-  size_t sz = strlen (SyncKnobs) ;
-  char * knobs = (char *) malloc (sz + 2) ;
-  if (knobs == NULL) {
-     vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
-     guarantee (0, "invariant") ;
-  }
-  strcpy (knobs, SyncKnobs) ;
-  knobs[sz+1] = 0 ;
-  for (char * p = knobs ; *p ; p++) {
-     if (*p == ':') *p = 0 ;
-  }
-
-  #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
-  SETKNOB(ReportSettings) ;
-  SETKNOB(Verbose) ;
-  SETKNOB(FixedSpin) ;
-  SETKNOB(SpinLimit) ;
-  SETKNOB(SpinBase) ;
-  SETKNOB(SpinBackOff);
-  SETKNOB(CASPenalty) ;
-  SETKNOB(OXPenalty) ;
-  SETKNOB(LogSpins) ;
-  SETKNOB(SpinSetSucc) ;
-  SETKNOB(SuccEnabled) ;
-  SETKNOB(SuccRestrict) ;
-  SETKNOB(Penalty) ;
-  SETKNOB(Bonus) ;
-  SETKNOB(BonusB) ;
-  SETKNOB(Poverty) ;
-  SETKNOB(SpinAfterFutile) ;
-  SETKNOB(UsePause) ;
-  SETKNOB(SpinEarly) ;
-  SETKNOB(OState) ;
-  SETKNOB(MaxSpinners) ;
-  SETKNOB(PreSpin) ;
-  SETKNOB(ExitPolicy) ;
-  SETKNOB(QMode);
-  SETKNOB(ResetEvent) ;
-  SETKNOB(MoveNotifyee) ;
-  SETKNOB(FastHSSEC) ;
-  #undef SETKNOB
-
-  if (os::is_MP()) {
-     BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
-     if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
-     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
-  } else {
-     Knob_SpinLimit = 0 ;
-     Knob_SpinBase  = 0 ;
-     Knob_PreSpin   = 0 ;
-     Knob_FixedSpin = -1 ;
-  }
-
-  if (Knob_LogSpins == 0) {
-     ObjectSynchronizer::_sync_FailedSpins = NULL ;
-  }
-
-  free (knobs) ;
-  OrderAccess::fence() ;
-  InitDone = 1 ;
-}
-
-// Theory of operations -- Monitors lists, thread residency, etc:
-//
-// * A thread acquires ownership of a monitor by successfully
-//   CAS()ing the _owner field from null to non-null.
-//
-// * Invariant: A thread appears on at most one monitor list --
-//   cxq, EntryList or WaitSet -- at any one time.
-//
-// * Contending threads "push" themselves onto the cxq with CAS
-//   and then spin/park.
-//
-// * After a contending thread eventually acquires the lock it must
-//   dequeue itself from either the EntryList or the cxq.
-//
-// * The exiting thread identifies and unparks an "heir presumptive"
-//   tentative successor thread on the EntryList.  Critically, the
-//   exiting thread doesn't unlink the successor thread from the EntryList.
-//   After having been unparked, the wakee will recontend for ownership of
-//   the monitor.   The successor (wakee) will either acquire the lock or
-//   re-park itself.
-//
-//   Succession is provided for by a policy of competitive handoff.
-//   The exiting thread does _not_ grant or pass ownership to the
-//   successor thread.  (This is also referred to as "handoff" succession").
-//   Instead the exiting thread releases ownership and possibly wakes
-//   a successor, so the successor can (re)compete for ownership of the lock.
-//   If the EntryList is empty but the cxq is populated the exiting
-//   thread will drain the cxq into the EntryList.  It does so by
-//   by detaching the cxq (installing null with CAS) and folding
-//   the threads from the cxq into the EntryList.  The EntryList is
-//   doubly linked, while the cxq is singly linked because of the
-//   CAS-based "push" used to enqueue recently arrived threads (RATs).
-//
-// * Concurrency invariants:
-//
-//   -- only the monitor owner may access or mutate the EntryList.
-//      The mutex property of the monitor itself protects the EntryList
-//      from concurrent interference.
-//   -- Only the monitor owner may detach the cxq.
-//
-// * The monitor entry list operations avoid locks, but strictly speaking
-//   they're not lock-free.  Enter is lock-free, exit is not.
-//   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
-//
-// * The cxq can have multiple concurrent "pushers" but only one concurrent
-//   detaching thread.  This mechanism is immune from the ABA corruption.
-//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
-//
-// * Taken together, the cxq and the EntryList constitute or form a
-//   single logical queue of threads stalled trying to acquire the lock.
-//   We use two distinct lists to improve the odds of a constant-time
-//   dequeue operation after acquisition (in the ::enter() epilog) and
-//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
-//   A key desideratum is to minimize queue & monitor metadata manipulation
-//   that occurs while holding the monitor lock -- that is, we want to
-//   minimize monitor lock holds times.  Note that even a small amount of
-//   fixed spinning will greatly reduce the # of enqueue-dequeue operations
-//   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
-//   locks and monitor metadata.
-//
-//   Cxq points to the the set of Recently Arrived Threads attempting entry.
-//   Because we push threads onto _cxq with CAS, the RATs must take the form of
-//   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
-//   the unlocking thread notices that EntryList is null but _cxq is != null.
-//
-//   The EntryList is ordered by the prevailing queue discipline and
-//   can be organized in any convenient fashion, such as a doubly-linked list or
-//   a circular doubly-linked list.  Critically, we want insert and delete operations
-//   to operate in constant-time.  If we need a priority queue then something akin
-//   to Solaris' sleepq would work nicely.  Viz.,
-//   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
-//   Queue discipline is enforced at ::exit() time, when the unlocking thread
-//   drains the cxq into the EntryList, and orders or reorders the threads on the
-//   EntryList accordingly.
-//
-//   Barring "lock barging", this mechanism provides fair cyclic ordering,
-//   somewhat similar to an elevator-scan.
-//
-// * The monitor synchronization subsystem avoids the use of native
-//   synchronization primitives except for the narrow platform-specific
-//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
-//   the semantics of park-unpark.  Put another way, this monitor implementation
-//   depends only on atomic operations and park-unpark.  The monitor subsystem
-//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
-//   underlying OS manages the READY<->RUN transitions.
-//
-// * Waiting threads reside on the WaitSet list -- wait() puts
-//   the caller onto the WaitSet.
-//
-// * notify() or notifyAll() simply transfers threads from the WaitSet to
-//   either the EntryList or cxq.  Subsequent exit() operations will
-//   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
-//   it's likely the notifyee would simply impale itself on the lock held
-//   by the notifier.
-//
-// * An interesting alternative is to encode cxq as (List,LockByte) where
-//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
-//   variable, like _recursions, in the scheme.  The threads or Events that form
-//   the list would have to be aligned in 256-byte addresses.  A thread would
-//   try to acquire the lock or enqueue itself with CAS, but exiting threads
-//   could use a 1-0 protocol and simply STB to set the LockByte to 0.
-//   Note that is is *not* word-tearing, but it does presume that full-word
-//   CAS operations are coherent with intermix with STB operations.  That's true
-//   on most common processors.
-//
-// * See also http://blogs.sun.com/dave
-
-
-void ATTR ObjectMonitor::EnterI (TRAPS) {
-    Thread * Self = THREAD ;
-    assert (Self->is_Java_thread(), "invariant") ;
-    assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
-
-    // Try the lock - TATAS
-    if (TryLock (Self) > 0) {
-        assert (_succ != Self              , "invariant") ;
-        assert (_owner == Self             , "invariant") ;
-        assert (_Responsible != Self       , "invariant") ;
-        return ;
-    }
-
-    DeferredInitialize () ;
-
-    // We try one round of spinning *before* enqueueing Self.
-    //
-    // If the _owner is ready but OFFPROC we could use a YieldTo()
-    // operation to donate the remainder of this thread's quantum
-    // to the owner.  This has subtle but beneficial affinity
-    // effects.
-
-    if (TrySpin (Self) > 0) {
-        assert (_owner == Self        , "invariant") ;
-        assert (_succ != Self         , "invariant") ;
-        assert (_Responsible != Self  , "invariant") ;
-        return ;
-    }
-
-    // The Spin failed -- Enqueue and park the thread ...
-    assert (_succ  != Self            , "invariant") ;
-    assert (_owner != Self            , "invariant") ;
-    assert (_Responsible != Self      , "invariant") ;
-
-    // Enqueue "Self" on ObjectMonitor's _cxq.
-    //
-    // Node acts as a proxy for Self.
-    // As an aside, if were to ever rewrite the synchronization code mostly
-    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
-    // Java objects.  This would avoid awkward lifecycle and liveness issues,
-    // as well as eliminate a subset of ABA issues.
-    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
-    //
-
-    ObjectWaiter node(Self) ;
-    Self->_ParkEvent->reset() ;
-    node._prev   = (ObjectWaiter *) 0xBAD ;
-    node.TState  = ObjectWaiter::TS_CXQ ;
-
-    // Push "Self" onto the front of the _cxq.
-    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
-    // Note that spinning tends to reduce the rate at which threads
-    // enqueue and dequeue on EntryList|cxq.
-    ObjectWaiter * nxt ;
-    for (;;) {
-        node._next = nxt = _cxq ;
-        if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
-
-        // Interference - the CAS failed because _cxq changed.  Just retry.
-        // As an optional optimization we retry the lock.
-        if (TryLock (Self) > 0) {
-            assert (_succ != Self         , "invariant") ;
-            assert (_owner == Self        , "invariant") ;
-            assert (_Responsible != Self  , "invariant") ;
-            return ;
-        }
-    }
-
-    // Check for cxq|EntryList edge transition to non-null.  This indicates
-    // the onset of contention.  While contention persists exiting threads
-    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
-    // operations revert to the faster 1-0 mode.  This enter operation may interleave
-    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
-    // arrange for one of the contending thread to use a timed park() operations
-    // to detect and recover from the race.  (Stranding is form of progress failure
-    // where the monitor is unlocked but all the contending threads remain parked).
-    // That is, at least one of the contended threads will periodically poll _owner.
-    // One of the contending threads will become the designated "Responsible" thread.
-    // The Responsible thread uses a timed park instead of a normal indefinite park
-    // operation -- it periodically wakes and checks for and recovers from potential
-    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
-    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
-    // be responsible for a monitor.
-    //
-    // Currently, one of the contended threads takes on the added role of "Responsible".
-    // A viable alternative would be to use a dedicated "stranding checker" thread
-    // that periodically iterated over all the threads (or active monitors) and unparked
-    // successors where there was risk of stranding.  This would help eliminate the
-    // timer scalability issues we see on some platforms as we'd only have one thread
-    // -- the checker -- parked on a timer.
-
-    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
-        // Try to assume the role of responsible thread for the monitor.
-        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
-        Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
-    }
-
-    // The lock have been released while this thread was occupied queueing
-    // itself onto _cxq.  To close the race and avoid "stranding" and
-    // progress-liveness failure we must resample-retry _owner before parking.
-    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
-    // In this case the ST-MEMBAR is accomplished with CAS().
-    //
-    // TODO: Defer all thread state transitions until park-time.
-    // Since state transitions are heavy and inefficient we'd like
-    // to defer the state transitions until absolutely necessary,
-    // and in doing so avoid some transitions ...
-
-    TEVENT (Inflated enter - Contention) ;
-    int nWakeups = 0 ;
-    int RecheckInterval = 1 ;
-
-    for (;;) {
-
-        if (TryLock (Self) > 0) break ;
-        assert (_owner != Self, "invariant") ;
-
-        if ((SyncFlags & 2) && _Responsible == NULL) {
-           Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
-        }
-
-        // park self
-        if (_Responsible == Self || (SyncFlags & 1)) {
-            TEVENT (Inflated enter - park TIMED) ;
-            Self->_ParkEvent->park ((jlong) RecheckInterval) ;
-            // Increase the RecheckInterval, but clamp the value.
-            RecheckInterval *= 8 ;
-            if (RecheckInterval > 1000) RecheckInterval = 1000 ;
-        } else {
-            TEVENT (Inflated enter - park UNTIMED) ;
-            Self->_ParkEvent->park() ;
-        }
-
-        if (TryLock(Self) > 0) break ;
-
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT (Inflated enter - Futile wakeup) ;
-        if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
-           ObjectSynchronizer::_sync_FutileWakeups->inc() ;
-        }
-        ++ nWakeups ;
-
-        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
-        // We can defer clearing _succ until after the spin completes
-        // TrySpin() must tolerate being called with _succ == Self.
-        // Try yet another round of adaptive spinning.
-        if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
-
-        // We can find that we were unpark()ed and redesignated _succ while
-        // we were spinning.  That's harmless.  If we iterate and call park(),
-        // park() will consume the event and return immediately and we'll
-        // just spin again.  This pattern can repeat, leaving _succ to simply
-        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
-        // Alternately, we can sample fired() here, and if set, forgo spinning
-        // in the next iteration.
-
-        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
-           Self->_ParkEvent->reset() ;
-           OrderAccess::fence() ;
-        }
-        if (_succ == Self) _succ = NULL ;
-
-        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
-        OrderAccess::fence() ;
-    }
-
-    // Egress :
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
-    // Normally we'll find Self on the EntryList .
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
-
-    assert (_owner == Self      , "invariant") ;
-    assert (object() != NULL    , "invariant") ;
-    // I'd like to write:
-    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-    // but as we're at a safepoint that's not safe.
-
-    UnlinkAfterAcquire (Self, &node) ;
-    if (_succ == Self) _succ = NULL ;
-
-    assert (_succ != Self, "invariant") ;
-    if (_Responsible == Self) {
-        _Responsible = NULL ;
-        // Dekker pivot-point.
-        // Consider OrderAccess::storeload() here
-
-        // We may leave threads on cxq|EntryList without a designated
-        // "Responsible" thread.  This is benign.  When this thread subsequently
-        // exits the monitor it can "see" such preexisting "old" threads --
-        // threads that arrived on the cxq|EntryList before the fence, above --
-        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
-        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
-        // non-null and elect a new "Responsible" timer thread.
-        //
-        // This thread executes:
-        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
-        //    LD cxq|EntryList               (in subsequent exit)
-        //
-        // Entering threads in the slow/contended path execute:
-        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
-        //    The (ST cxq; MEMBAR) is accomplished with CAS().
-        //
-        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
-        // exit operation from floating above the ST Responsible=null.
-        //
-        // In *practice* however, EnterI() is always followed by some atomic
-        // operation such as the decrement of _count in ::enter().  Those atomics
-        // obviate the need for the explicit MEMBAR, above.
-    }
-
-    // We've acquired ownership with CAS().
-    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
-    // But since the CAS() this thread may have also stored into _succ,
-    // EntryList, cxq or Responsible.  These meta-data updates must be
-    // visible __before this thread subsequently drops the lock.
-    // Consider what could occur if we didn't enforce this constraint --
-    // STs to monitor meta-data and user-data could reorder with (become
-    // visible after) the ST in exit that drops ownership of the lock.
-    // Some other thread could then acquire the lock, but observe inconsistent
-    // or old monitor meta-data and heap data.  That violates the JMM.
-    // To that end, the 1-0 exit() operation must have at least STST|LDST
-    // "release" barrier semantics.  Specifically, there must be at least a
-    // STST|LDST barrier in exit() before the ST of null into _owner that drops
-    // the lock.   The barrier ensures that changes to monitor meta-data and data
-    // protected by the lock will be visible before we release the lock, and
-    // therefore before some other thread (CPU) has a chance to acquire the lock.
-    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
-    //
-    // Critically, any prior STs to _succ or EntryList must be visible before
-    // the ST of null into _owner in the *subsequent* (following) corresponding
-    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
-    // execute a serializing instruction.
-
-    if (SyncFlags & 8) {
-       OrderAccess::fence() ;
-    }
-    return ;
-}
-
-// ExitSuspendEquivalent:
-// A faster alternate to handle_special_suspend_equivalent_condition()
-//
-// handle_special_suspend_equivalent_condition() unconditionally
-// acquires the SR_lock.  On some platforms uncontended MutexLocker()
-// operations have high latency.  Note that in ::enter() we call HSSEC
-// while holding the monitor, so we effectively lengthen the critical sections.
-//
-// There are a number of possible solutions:
-//
-// A.  To ameliorate the problem we might also defer state transitions
-//     to as late as possible -- just prior to parking.
-//     Given that, we'd call HSSEC after having returned from park(),
-//     but before attempting to acquire the monitor.  This is only a
-//     partial solution.  It avoids calling HSSEC while holding the
-//     monitor (good), but it still increases successor reacquisition latency --
-//     the interval between unparking a successor and the time the successor
-//     resumes and retries the lock.  See ReenterI(), which defers state transitions.
-//     If we use this technique we can also avoid EnterI()-exit() loop
-//     in ::enter() where we iteratively drop the lock and then attempt
-//     to reacquire it after suspending.
-//
-// B.  In the future we might fold all the suspend bits into a
-//     composite per-thread suspend flag and then update it with CAS().
-//     Alternately, a Dekker-like mechanism with multiple variables
-//     would suffice:
-//       ST Self->_suspend_equivalent = false
-//       MEMBAR
-//       LD Self_>_suspend_flags
-//
-
-
-bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
-   int Mode = Knob_FastHSSEC ;
-   if (Mode && !jSelf->is_external_suspend()) {
-      assert (jSelf->is_suspend_equivalent(), "invariant") ;
-      jSelf->clear_suspend_equivalent() ;
-      if (2 == Mode) OrderAccess::storeload() ;
-      if (!jSelf->is_external_suspend()) return false ;
-      // We raced a suspension -- fall thru into the slow path
-      TEVENT (ExitSuspendEquivalent - raced) ;
-      jSelf->set_suspend_equivalent() ;
-   }
-   return jSelf->handle_special_suspend_equivalent_condition() ;
-}
-
-
-// ReenterI() is a specialized inline form of the latter half of the
-// contended slow-path from EnterI().  We use ReenterI() only for
-// monitor reentry in wait().
-//
-// In the future we should reconcile EnterI() and ReenterI(), adding
-// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
-// loop accordingly.
-
-void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
-    assert (Self != NULL                , "invariant") ;
-    assert (SelfNode != NULL            , "invariant") ;
-    assert (SelfNode->_thread == Self   , "invariant") ;
-    assert (_waiters > 0                , "invariant") ;
-    assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
-    assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
-    JavaThread * jt = (JavaThread *) Self ;
-
-    int nWakeups = 0 ;
-    for (;;) {
-        ObjectWaiter::TStates v = SelfNode->TState ;
-        guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
-        assert    (_owner != Self, "invariant") ;
-
-        if (TryLock (Self) > 0) break ;
-        if (TrySpin (Self) > 0) break ;
-
-        TEVENT (Wait Reentry - parking) ;
-
-        // State transition wrappers around park() ...
-        // ReenterI() wisely defers state transitions until
-        // it's clear we must park the thread.
-        {
-           OSThreadContendState osts(Self->osthread());
-           ThreadBlockInVM tbivm(jt);
-
-           // cleared by handle_special_suspend_equivalent_condition()
-           // or java_suspend_self()
-           jt->set_suspend_equivalent();
-           if (SyncFlags & 1) {
-              Self->_ParkEvent->park ((jlong)1000) ;
-           } else {
-              Self->_ParkEvent->park () ;
-           }
-
-           // were we externally suspended while we were waiting?
-           for (;;) {
-              if (!ExitSuspendEquivalent (jt)) break ;
-              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
-              jt->java_suspend_self();
-              jt->set_suspend_equivalent();
-           }
-        }
-
-        // Try again, but just so we distinguish between futile wakeups and
-        // successful wakeups.  The following test isn't algorithmically
-        // necessary, but it helps us maintain sensible statistics.
-        if (TryLock(Self) > 0) break ;
-
-        // The lock is still contested.
-        // Keep a tally of the # of futile wakeups.
-        // Note that the counter is not protected by a lock or updated by atomics.
-        // That is by design - we trade "lossy" counters which are exposed to
-        // races during updates for a lower probe effect.
-        TEVENT (Wait Reentry - futile wakeup) ;
-        ++ nWakeups ;
-
-        // Assuming this is not a spurious wakeup we'll normally
-        // find that _succ == Self.
-        if (_succ == Self) _succ = NULL ;
-
-        // Invariant: after clearing _succ a contending thread
-        // *must* retry  _owner before parking.
-        OrderAccess::fence() ;
-
-        if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
-          ObjectSynchronizer::_sync_FutileWakeups->inc() ;
-        }
-    }
-
-    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
-    // Normally we'll find Self on the EntryList.
-    // Unlinking from the EntryList is constant-time and atomic-free.
-    // From the perspective of the lock owner (this thread), the
-    // EntryList is stable and cxq is prepend-only.
-    // The head of cxq is volatile but the interior is stable.
-    // In addition, Self.TState is stable.
-
-    assert (_owner == Self, "invariant") ;
-    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-    UnlinkAfterAcquire (Self, SelfNode) ;
-    if (_succ == Self) _succ = NULL ;
-    assert (_succ != Self, "invariant") ;
-    SelfNode->TState = ObjectWaiter::TS_RUN ;
-    OrderAccess::fence() ;      // see comments at the end of EnterI()
-}
-
-bool ObjectMonitor::try_enter(Thread* THREAD) {
-  if (THREAD != _owner) {
-    if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD ;
-       _recursions = 1 ;
-       OwnerIsThread = 1 ;
-       return true;
-    }
-    if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-      return false;
-    }
-    return true;
-  } else {
-    _recursions++;
-    return true;
-  }
-}
-
-void ATTR ObjectMonitor::enter(TRAPS) {
-  // The following code is ordered to check the most common cases first
-  // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
-  Thread * const Self = THREAD ;
-  void * cur ;
-
-  cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
-  if (cur == NULL) {
-     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
-     assert (_recursions == 0   , "invariant") ;
-     assert (_owner      == Self, "invariant") ;
-     // CONSIDER: set or assert OwnerIsThread == 1
-     return ;
-  }
-
-  if (cur == Self) {
-     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
-     _recursions ++ ;
-     return ;
-  }
-
-  if (Self->is_lock_owned ((address)cur)) {
-    assert (_recursions == 0, "internal state error");
-    _recursions = 1 ;
-    // Commute owner from a thread-specific on-stack BasicLockObject address to
-    // a full-fledged "Thread *".
-    _owner = Self ;
-    OwnerIsThread = 1 ;
-    return ;
-  }
-
-  // We've encountered genuine contention.
-  assert (Self->_Stalled == 0, "invariant") ;
-  Self->_Stalled = intptr_t(this) ;
-
-  // Try one round of spinning *before* enqueueing Self
-  // and before going through the awkward and expensive state
-  // transitions.  The following spin is strictly optional ...
-  // Note that if we acquire the monitor from an initial spin
-  // we forgo posting JVMTI events and firing DTRACE probes.
-  if (Knob_SpinEarly && TrySpin (Self) > 0) {
-     assert (_owner == Self      , "invariant") ;
-     assert (_recursions == 0    , "invariant") ;
-     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-     Self->_Stalled = 0 ;
-     return ;
-  }
-
-  assert (_owner != Self          , "invariant") ;
-  assert (_succ  != Self          , "invariant") ;
-  assert (Self->is_Java_thread()  , "invariant") ;
-  JavaThread * jt = (JavaThread *) Self ;
-  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
-  assert (jt->thread_state() != _thread_blocked   , "invariant") ;
-  assert (this->object() != NULL  , "invariant") ;
-  assert (_count >= 0, "invariant") ;
-
-  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
-  // Ensure the object-monitor relationship remains stable while there's contention.
-  Atomic::inc_ptr(&_count);
-
-  { // Change java thread status to indicate blocked on monitor enter.
-    JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
-
-    DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
-    if (JvmtiExport::should_post_monitor_contended_enter()) {
-      JvmtiExport::post_monitor_contended_enter(jt, this);
-    }
-
-    OSThreadContendState osts(Self->osthread());
-    ThreadBlockInVM tbivm(jt);
-
-    Self->set_current_pending_monitor(this);
-
-    // TODO-FIXME: change the following for(;;) loop to straight-line code.
-    for (;;) {
-      jt->set_suspend_equivalent();
-      // cleared by handle_special_suspend_equivalent_condition()
-      // or java_suspend_self()
-
-      EnterI (THREAD) ;
-
-      if (!ExitSuspendEquivalent(jt)) break ;
-
-      //
-      // We have acquired the contended monitor, but while we were
-      // waiting another thread suspended us. We don't want to enter
-      // the monitor while suspended because that would surprise the
-      // thread that suspended us.
-      //
-          _recursions = 0 ;
-      _succ = NULL ;
-      exit (Self) ;
-
-      jt->java_suspend_self();
-    }
-    Self->set_current_pending_monitor(NULL);
-  }
-
-  Atomic::dec_ptr(&_count);
-  assert (_count >= 0, "invariant") ;
-  Self->_Stalled = 0 ;
-
-  // Must either set _recursions = 0 or ASSERT _recursions == 0.
-  assert (_recursions == 0     , "invariant") ;
-  assert (_owner == Self       , "invariant") ;
-  assert (_succ  != Self       , "invariant") ;
-  assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-
-  // The thread -- now the owner -- is back in vm mode.
-  // Report the glorious news via TI,DTrace and jvmstat.
-  // The probe effect is non-trivial.  All the reportage occurs
-  // while we hold the monitor, increasing the length of the critical
-  // section.  Amdahl's parallel speedup law comes vividly into play.
-  //
-  // Another option might be to aggregate the events (thread local or
-  // per-monitor aggregation) and defer reporting until a more opportune
-  // time -- such as next time some thread encounters contention but has
-  // yet to acquire the lock.  While spinning that thread could
-  // spinning we could increment JVMStat counters, etc.
-
-  DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
-  if (JvmtiExport::should_post_monitor_contended_entered()) {
-    JvmtiExport::post_monitor_contended_entered(jt, this);
-  }
-  if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
-     ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
-  }
-}
-
-void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
-   assert (_owner == Self, "invariant") ;
-
-   // Exit protocol:
-   // 1. ST _succ = wakee
-   // 2. membar #loadstore|#storestore;
-   // 2. ST _owner = NULL
-   // 3. unpark(wakee)
-
-   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
-   ParkEvent * Trigger = Wakee->_event ;
+};
 
-   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
-   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
-   // out-of-scope (non-extant).
-   Wakee  = NULL ;
-
-   // Drop the lock
-   OrderAccess::release_store_ptr (&_owner, NULL) ;
-   OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
-
-   // TODO-FIXME:
-   // If there's a safepoint pending the best policy would be to
-   // get _this thread to a safepoint and only wake the successor
-   // after the safepoint completed.  monitorexit uses a "leaf"
-   // state transition, however, so this thread can't become
-   // safe at this point in time.  (Its stack isn't walkable).
-   // The next best thing is to defer waking the successor by
-   // adding to a list of thread to be unparked after at the
-   // end of the forthcoming STW).
-   if (SafepointSynchronize::do_call_back()) {
-      TEVENT (unpark before SAFEPOINT) ;
-   }
-
-   // Possible optimizations ...
-   //
-   // * Consider: set Wakee->UnparkTime = timeNow()
-   //   When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
-   //   By measuring recent ONPROC latency we can approximate the
-   //   system load.  In turn, we can feed that information back
-   //   into the spinning & succession policies.
-   //   (ONPROC latency correlates strongly with load).
-   //
-   // * Pull affinity:
-   //   If the wakee is cold then transiently setting it's affinity
-   //   to the current CPU is a good idea.
-   //   See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
-   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
-   Trigger->unpark() ;
-
-   // Maintain stats and report events to JVMTI
-   if (ObjectSynchronizer::_sync_Parks != NULL) {
-      ObjectSynchronizer::_sync_Parks->inc() ;
-   }
-}
-
-
-// exit()
-// ~~~~~~
-// Note that the collector can't reclaim the objectMonitor or deflate
-// the object out from underneath the thread calling ::exit() as the
-// thread calling ::exit() never transitions to a stable state.
-// This inhibits GC, which in turn inhibits asynchronous (and
-// inopportune) reclamation of "this".
-//
-// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
-// There's one exception to the claim above, however.  EnterI() can call
-// exit() to drop a lock if the acquirer has been externally suspended.
-// In that case exit() is called with _thread_state as _thread_blocked,
-// but the monitor's _count field is > 0, which inhibits reclamation.
-//
-// 1-0 exit
-// ~~~~~~~~
-// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
-// the fast-path operators have been optimized so the common ::exit()
-// operation is 1-0.  See i486.ad fast_unlock(), for instance.
-// The code emitted by fast_unlock() elides the usual MEMBAR.  This
-// greatly improves latency -- MEMBAR and CAS having considerable local
-// latency on modern processors -- but at the cost of "stranding".  Absent the
-// MEMBAR, a thread in fast_unlock() can race a thread in the slow
-// ::enter() path, resulting in the entering thread being stranding
-// and a progress-liveness failure.   Stranding is extremely rare.
-// We use timers (timed park operations) & periodic polling to detect
-// and recover from stranding.  Potentially stranded threads periodically
-// wake up and poll the lock.  See the usage of the _Responsible variable.
-//
-// The CAS() in enter provides for safety and exclusion, while the CAS or
-// MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
-// eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
-// We detect and recover from stranding with timers.
+// Release all inflated monitors owned by THREAD.  Lightweight monitors are
+// ignored.  This is meant to be called during JNI thread detach which assumes
+// all remaining monitors are heavyweight.  All exceptions are swallowed.
+// Scanning the extant monitor list can be time consuming.
+// A simple optimization is to add a per-thread flag that indicates a thread
+// called jni_monitorenter() during its lifetime.
 //
-// If a thread transiently strands it'll park until (a) another
-// thread acquires the lock and then drops the lock, at which time the
-// exiting thread will notice and unpark the stranded thread, or, (b)
-// the timer expires.  If the lock is high traffic then the stranding latency
-// will be low due to (a).  If the lock is low traffic then the odds of
-// stranding are lower, although the worst-case stranding latency
-// is longer.  Critically, we don't want to put excessive load in the
-// platform's timer subsystem.  We want to minimize both the timer injection
-// rate (timers created/sec) as well as the number of timers active at
-// any one time.  (more precisely, we want to minimize timer-seconds, which is
-// the integral of the # of active timers at any instant over time).
-// Both impinge on OS scalability.  Given that, at most one thread parked on
-// a monitor will use a timer.
-
-void ATTR ObjectMonitor::exit(TRAPS) {
-   Thread * Self = THREAD ;
-   if (THREAD != _owner) {
-     if (THREAD->is_lock_owned((address) _owner)) {
-       // Transmute _owner from a BasicLock pointer to a Thread address.
-       // We don't need to hold _mutex for this transition.
-       // Non-null to Non-null is safe as long as all readers can
-       // tolerate either flavor.
-       assert (_recursions == 0, "invariant") ;
-       _owner = THREAD ;
-       _recursions = 0 ;
-       OwnerIsThread = 1 ;
-     } else {
-       // NOTE: we need to handle unbalanced monitor enter/exit
-       // in native code by throwing an exception.
-       // TODO: Throw an IllegalMonitorStateException ?
-       TEVENT (Exit - Throw IMSX) ;
-       assert(false, "Non-balanced monitor enter/exit!");
-       if (false) {
-          THROW(vmSymbols::java_lang_IllegalMonitorStateException());
-       }
-       return;
-     }
-   }
-
-   if (_recursions != 0) {
-     _recursions--;        // this is simple recursive enter
-     TEVENT (Inflated exit - recursive) ;
-     return ;
-   }
-
-   // Invariant: after setting Responsible=null an thread must execute
-   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL ;
-   }
-
-   for (;;) {
-      assert (THREAD == _owner, "invariant") ;
-
-      // Fast-path monitor exit:
-      //
-      // Observe the Dekker/Lamport duality:
-      // A thread in ::exit() executes:
-      //   ST Owner=null; MEMBAR; LD EntryList|cxq.
-      // A thread in the contended ::enter() path executes the complementary:
-      //   ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
-      //
-      // Note that there's a benign race in the exit path.  We can drop the
-      // lock, another thread can reacquire the lock immediately, and we can
-      // then wake a thread unnecessarily (yet another flavor of futile wakeup).
-      // This is benign, and we've structured the code so the windows are short
-      // and the frequency of such futile wakeups is low.
-      //
-      // We could eliminate the race by encoding both the "LOCKED" state and
-      // the queue head in a single word.  Exit would then use either CAS to
-      // clear the LOCKED bit/byte.  This precludes the desirable 1-0 optimization,
-      // however.
-      //
-      // Possible fast-path ::exit() optimization:
-      // The current fast-path exit implementation fetches both cxq and EntryList.
-      // See also i486.ad fast_unlock().  Testing has shown that two LDs
-      // isn't measurably slower than a single LD on any platforms.
-      // Still, we could reduce the 2 LDs to one or zero by one of the following:
-      //
-      // - Use _count instead of cxq|EntryList
-      //   We intend to eliminate _count, however, when we switch
-      //   to on-the-fly deflation in ::exit() as is used in
-      //   Metalocks and RelaxedLocks.
-      //
-      // - Establish the invariant that cxq == null implies EntryList == null.
-      //   set cxq == EMPTY (1) to encode the state where cxq is empty
-      //   by EntryList != null.  EMPTY is a distinguished value.
-      //   The fast-path exit() would fetch cxq but not EntryList.
-      //
-      // - Encode succ as follows:
-      //   succ = t :  Thread t is the successor -- t is ready or is spinning.
-      //               Exiting thread does not need to wake a successor.
-      //   succ = 0 :  No successor required -> (EntryList|cxq) == null
-      //               Exiting thread does not need to wake a successor
-      //   succ = 1 :  Successor required    -> (EntryList|cxq) != null and
-      //               logically succ == null.
-      //               Exiting thread must wake a successor.
-      //
-      //   The 1-1 fast-exit path would appear as :
-      //     _owner = null ; membar ;
-      //     if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
-      //     goto FastPathDone ;
-      //
-      //   and the 1-0 fast-exit path would appear as:
-      //      if (_succ == 1) goto SlowPath
-      //      Owner = null ;
-      //      goto FastPathDone
-      //
-      // - Encode the LSB of _owner as 1 to indicate that exit()
-      //   must use the slow-path and make a successor ready.
-      //   (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
-      //   (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
-      //   The 1-0 fast exit path would read:
-      //      if (_owner != Self) goto SlowPath
-      //      _owner = null
-      //      goto FastPathDone
-
-      if (Knob_ExitPolicy == 0) {
-         // release semantics: prior loads and stores from within the critical section
-         // must not float (reorder) past the following store that drops the lock.
-         // On SPARC that requires MEMBAR #loadstore|#storestore.
-         // But of course in TSO #loadstore|#storestore is not required.
-         // I'd like to write one of the following:
-         // A.  OrderAccess::release() ; _owner = NULL
-         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
-         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
-         // store into a _dummy variable.  That store is not needed, but can result
-         // in massive wasteful coherency traffic on classic SMP systems.
-         // Instead, I use release_store(), which is implemented as just a simple
-         // ST on x64, x86 and SPARC.
-         OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
-         OrderAccess::storeload() ;                         // See if we need to wake a successor
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            TEVENT (Inflated exit - simple egress) ;
-            return ;
-         }
-         TEVENT (Inflated exit - complex egress) ;
-
-         // Normally the exiting thread is responsible for ensuring succession,
-         // but if other successors are ready or other entering threads are spinning
-         // then this thread can simply store NULL into _owner and exit without
-         // waking a successor.  The existence of spinners or ready successors
-         // guarantees proper succession (liveness).  Responsibility passes to the
-         // ready or running successors.  The exiting thread delegates the duty.
-         // More precisely, if a successor already exists this thread is absolved
-         // of the responsibility of waking (unparking) one.
-         //
-         // The _succ variable is critical to reducing futile wakeup frequency.
-         // _succ identifies the "heir presumptive" thread that has been made
-         // ready (unparked) but that has not yet run.  We need only one such
-         // successor thread to guarantee progress.
-         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
-         // section 3.3 "Futile Wakeup Throttling" for details.
-         //
-         // Note that spinners in Enter() also set _succ non-null.
-         // In the current implementation spinners opportunistically set
-         // _succ so that exiting threads might avoid waking a successor.
-         // Another less appealing alternative would be for the exiting thread
-         // to drop the lock and then spin briefly to see if a spinner managed
-         // to acquire the lock.  If so, the exiting thread could exit
-         // immediately without waking a successor, otherwise the exiting
-         // thread would need to dequeue and wake a successor.
-         // (Note that we'd need to make the post-drop spin short, but no
-         // shorter than the worst-case round-trip cache-line migration time.
-         // The dropped lock needs to become visible to the spinner, and then
-         // the acquisition of the lock by the spinner must become visible to
-         // the exiting thread).
-         //
-
-         // It appears that an heir-presumptive (successor) must be made ready.
-         // Only the current lock owner can manipulate the EntryList or
-         // drain _cxq, so we need to reacquire the lock.  If we fail
-         // to reacquire the lock the responsibility for ensuring succession
-         // falls to the new owner.
-         //
-         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-            return ;
-         }
-         TEVENT (Exit - Reacquired) ;
-      } else {
-         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
-            OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
-            OrderAccess::storeload() ;
-            // Ratify the previously observed values.
-            if (_cxq == NULL || _succ != NULL) {
-                TEVENT (Inflated exit - simple egress) ;
-                return ;
-            }
-
-            // inopportune interleaving -- the exiting thread (this thread)
-            // in the fast-exit path raced an entering thread in the slow-enter
-            // path.
-            // We have two choices:
-            // A.  Try to reacquire the lock.
-            //     If the CAS() fails return immediately, otherwise
-            //     we either restart/rerun the exit operation, or simply
-            //     fall-through into the code below which wakes a successor.
-            // B.  If the elements forming the EntryList|cxq are TSM
-            //     we could simply unpark() the lead thread and return
-            //     without having set _succ.
-            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
-               TEVENT (Inflated exit - reacquired succeeded) ;
-               return ;
-            }
-            TEVENT (Inflated exit - reacquired failed) ;
-         } else {
-            TEVENT (Inflated exit - complex egress) ;
-         }
-      }
-
-      guarantee (_owner == THREAD, "invariant") ;
-
-      // Select an appropriate successor ("heir presumptive") from the EntryList
-      // and make it ready.  Generally we just wake the head of EntryList .
-      // There's no algorithmic constraint that we use the head - it's just
-      // a policy decision.   Note that the thread at head of the EntryList
-      // remains at the head until it acquires the lock.  This means we'll
-      // repeatedly wake the same thread until it manages to grab the lock.
-      // This is generally a good policy - if we're seeing lots of futile wakeups
-      // at least we're waking/rewaking a thread that's like to be hot or warm
-      // (have residual D$ and TLB affinity).
-      //
-      // "Wakeup locality" optimization:
-      // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
-      // In the future we'll try to bias the selection mechanism
-      // to preferentially pick a thread that recently ran on
-      // a processor element that shares cache with the CPU on which
-      // the exiting thread is running.   We need access to Solaris'
-      // schedctl.sc_cpu to make that work.
-      //
-      ObjectWaiter * w = NULL ;
-      int QMode = Knob_QMode ;
-
-      if (QMode == 2 && _cxq != NULL) {
-          // QMode == 2 : cxq has precedence over EntryList.
-          // Try to directly wake a successor from the cxq.
-          // If successful, the successor will need to unlink itself from cxq.
-          w = _cxq ;
-          assert (w != NULL, "invariant") ;
-          assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
-          ExitEpilog (Self, w) ;
-          return ;
-      }
-
-      if (QMode == 3 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq ;
-          for (;;) {
-             assert (w != NULL, "Invariant") ;
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
-             if (u == w) break ;
-             w = u ;
-          }
-          assert (w != NULL              , "invariant") ;
-
-          ObjectWaiter * q = NULL ;
-          ObjectWaiter * p ;
-          for (p = w ; p != NULL ; p = p->_next) {
-              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
-              p->TState = ObjectWaiter::TS_ENTER ;
-              p->_prev = q ;
-              q = p ;
-          }
-
-          // Append the RATs to the EntryList
-          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
-          ObjectWaiter * Tail ;
-          for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
-          if (Tail == NULL) {
-              _EntryList = w ;
-          } else {
-              Tail->_next = w ;
-              w->_prev = Tail ;
-          }
-
-          // Fall thru into code that tries to wake a successor from EntryList
-      }
-
-      if (QMode == 4 && _cxq != NULL) {
-          // Aggressively drain cxq into EntryList at the first opportunity.
-          // This policy ensure that recently-run threads live at the head of EntryList.
-
-          // Drain _cxq into EntryList - bulk transfer.
-          // First, detach _cxq.
-          // The following loop is tantamount to: w = swap (&cxq, NULL)
-          w = _cxq ;
-          for (;;) {
-             assert (w != NULL, "Invariant") ;
-             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
-             if (u == w) break ;
-             w = u ;
-          }
-          assert (w != NULL              , "invariant") ;
-
-          ObjectWaiter * q = NULL ;
-          ObjectWaiter * p ;
-          for (p = w ; p != NULL ; p = p->_next) {
-              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
-              p->TState = ObjectWaiter::TS_ENTER ;
-              p->_prev = q ;
-              q = p ;
-          }
-
-          // Prepend the RATs to the EntryList
-          if (_EntryList != NULL) {
-              q->_next = _EntryList ;
-              _EntryList->_prev = q ;
-          }
-          _EntryList = w ;
-
-          // Fall thru into code that tries to wake a successor from EntryList
-      }
-
-      w = _EntryList  ;
-      if (w != NULL) {
-          // I'd like to write: guarantee (w->_thread != Self).
-          // But in practice an exiting thread may find itself on the EntryList.
-          // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
-          // then calls exit().  Exit release the lock by setting O._owner to NULL.
-          // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
-          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
-          // release the lock "O".  T2 resumes immediately after the ST of null into
-          // _owner, above.  T2 notices that the EntryList is populated, so it
-          // reacquires the lock and then finds itself on the EntryList.
-          // Given all that, we have to tolerate the circumstance where "w" is
-          // associated with Self.
-          assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
-          ExitEpilog (Self, w) ;
-          return ;
-      }
-
-      // If we find that both _cxq and EntryList are null then just
-      // re-run the exit protocol from the top.
-      w = _cxq ;
-      if (w == NULL) continue ;
-
-      // Drain _cxq into EntryList - bulk transfer.
-      // First, detach _cxq.
-      // The following loop is tantamount to: w = swap (&cxq, NULL)
-      for (;;) {
-          assert (w != NULL, "Invariant") ;
-          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
-          if (u == w) break ;
-          w = u ;
-      }
-      TEVENT (Inflated exit - drain cxq into EntryList) ;
-
-      assert (w != NULL              , "invariant") ;
-      assert (_EntryList  == NULL    , "invariant") ;
-
-      // Convert the LIFO SLL anchored by _cxq into a DLL.
-      // The list reorganization step operates in O(LENGTH(w)) time.
-      // It's critical that this step operate quickly as
-      // "Self" still holds the outer-lock, restricting parallelism
-      // and effectively lengthening the critical section.
-      // Invariant: s chases t chases u.
-      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
-      // we have faster access to the tail.
-
-      if (QMode == 1) {
-         // QMode == 1 : drain cxq to EntryList, reversing order
-         // We also reverse the order of the list.
-         ObjectWaiter * s = NULL ;
-         ObjectWaiter * t = w ;
-         ObjectWaiter * u = NULL ;
-         while (t != NULL) {
-             guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
-             t->TState = ObjectWaiter::TS_ENTER ;
-             u = t->_next ;
-             t->_prev = u ;
-             t->_next = s ;
-             s = t;
-             t = u ;
-         }
-         _EntryList  = s ;
-         assert (s != NULL, "invariant") ;
-      } else {
-         // QMode == 0 or QMode == 2
-         _EntryList = w ;
-         ObjectWaiter * q = NULL ;
-         ObjectWaiter * p ;
-         for (p = w ; p != NULL ; p = p->_next) {
-             guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
-             p->TState = ObjectWaiter::TS_ENTER ;
-             p->_prev = q ;
-             q = p ;
-         }
-      }
-
-      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
-      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
-
-      // See if we can abdicate to a spinner instead of waking a thread.
-      // A primary goal of the implementation is to reduce the
-      // context-switch rate.
-      if (_succ != NULL) continue;
-
-      w = _EntryList  ;
-      if (w != NULL) {
-          guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
-          ExitEpilog (Self, w) ;
-          return ;
-      }
-   }
-}
-// complete_exit exits a lock returning recursion count
-// complete_exit/reenter operate as a wait without waiting
-// complete_exit requires an inflated monitor
-// The _owner field is not always the Thread addr even with an
-// inflated monitor, e.g. the monitor can be inflated by a non-owning
-// thread due to contention.
-intptr_t ObjectMonitor::complete_exit(TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
-
-   DeferredInitialize();
-
-   if (THREAD != _owner) {
-    if (THREAD->is_lock_owned ((address)_owner)) {
-       assert(_recursions == 0, "internal state error");
-       _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
-       _recursions = 0 ;
-       OwnerIsThread = 1 ;
-    }
-   }
-
-   guarantee(Self == _owner, "complete_exit not owner");
-   intptr_t save = _recursions; // record the old recursion count
-   _recursions = 0;        // set the recursion level to be 0
-   exit (Self) ;           // exit the monitor
-   guarantee (_owner != Self, "invariant");
-   return save;
-}
-
-// reenter() enters a lock and sets recursion count
-// complete_exit/reenter operate as a wait without waiting
-void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
-   Thread * const Self = THREAD;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
-
-   guarantee(_owner != Self, "reenter already owner");
-   enter (THREAD);       // enter the monitor
-   guarantee (_recursions == 0, "reenter recursion");
-   _recursions = recursions;
-   return;
-}
-
-// Note: a subset of changes to ObjectMonitor::wait()
-// will need to be replicated in complete_exit above
-void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
-   Thread * const Self = THREAD ;
-   assert(Self->is_Java_thread(), "Must be Java thread!");
-   JavaThread *jt = (JavaThread *)THREAD;
-
-   DeferredInitialize () ;
-
-   // Throw IMSX or IEX.
-   CHECK_OWNER();
-
-   // check for a pending interrupt
-   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-     // post monitor waited event.  Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-        // Note: 'false' parameter is passed here because the
-        // wait was not timed out due to thread interrupt.
-        JvmtiExport::post_monitor_waited(jt, this, false);
-     }
-     TEVENT (Wait - Throw IEX) ;
-     THROW(vmSymbols::java_lang_InterruptedException());
-     return ;
-   }
-   TEVENT (Wait) ;
-
-   assert (Self->_Stalled == 0, "invariant") ;
-   Self->_Stalled = intptr_t(this) ;
-   jt->set_current_waiting_monitor(this);
-
-   // create a node to be put into the queue
-   // Critically, after we reset() the event but prior to park(), we must check
-   // for a pending interrupt.
-   ObjectWaiter node(Self);
-   node.TState = ObjectWaiter::TS_WAIT ;
-   Self->_ParkEvent->reset() ;
-   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
-
-   // Enter the waiting queue, which is a circular doubly linked list in this case
-   // but it could be a priority queue or any data structure.
-   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
-   // by the the owner of the monitor *except* in the case where park()
-   // returns because of a timeout of interrupt.  Contention is exceptionally rare
-   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
-
-   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
-   AddWaiter (&node) ;
-   Thread::SpinRelease (&_WaitSetLock) ;
-
-   if ((SyncFlags & 4) == 0) {
-      _Responsible = NULL ;
-   }
-   intptr_t save = _recursions; // record the old recursion count
-   _waiters++;                  // increment the number of waiters
-   _recursions = 0;             // set the recursion level to be 1
-   exit (Self) ;                    // exit the monitor
-   guarantee (_owner != Self, "invariant") ;
-
-   // As soon as the ObjectMonitor's ownership is dropped in the exit()
-   // call above, another thread can enter() the ObjectMonitor, do the
-   // notify(), and exit() the ObjectMonitor. If the other thread's
-   // exit() call chooses this thread as the successor and the unpark()
-   // call happens to occur while this thread is posting a
-   // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
-   // handler using RawMonitors and consuming the unpark().
-   //
-   // To avoid the problem, we re-post the event. This does no harm
-   // even if the original unpark() was not consumed because we are the
-   // chosen successor for this monitor.
-   if (node._notified != 0 && _succ == Self) {
-      node._event->unpark();
-   }
-
-   // The thread is on the WaitSet list - now park() it.
-   // On MP systems it's conceivable that a brief spin before we park
-   // could be profitable.
-   //
-   // TODO-FIXME: change the following logic to a loop of the form
-   //   while (!timeout && !interrupted && _notified == 0) park()
-
-   int ret = OS_OK ;
-   int WasNotified = 0 ;
-   { // State transition wrappers
-     OSThread* osthread = Self->osthread();
-     OSThreadWaitState osts(osthread, true);
-     {
-       ThreadBlockInVM tbivm(jt);
-       // Thread is in thread_blocked state and oop access is unsafe.
-       jt->set_suspend_equivalent();
-
-       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
-           // Intentionally empty
-       } else
-       if (node._notified == 0) {
-         if (millis <= 0) {
-            Self->_ParkEvent->park () ;
-         } else {
-            ret = Self->_ParkEvent->park (millis) ;
-         }
-       }
-
-       // were we externally suspended while we were waiting?
-       if (ExitSuspendEquivalent (jt)) {
-          // TODO-FIXME: add -- if succ == Self then succ = null.
-          jt->java_suspend_self();
-       }
-
-     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
-
-
-     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
-     // from the WaitSet to the EntryList.
-     // See if we need to remove Node from the WaitSet.
-     // We use double-checked locking to avoid grabbing _WaitSetLock
-     // if the thread is not on the wait queue.
-     //
-     // Note that we don't need a fence before the fetch of TState.
-     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
-     // written by the is thread. (perhaps the fetch might even be satisfied
-     // by a look-aside into the processor's own store buffer, although given
-     // the length of the code path between the prior ST and this load that's
-     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
-     // then we'll acquire the lock and then re-fetch a fresh TState value.
-     // That is, we fail toward safety.
-
-     if (node.TState == ObjectWaiter::TS_WAIT) {
-         Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
-         if (node.TState == ObjectWaiter::TS_WAIT) {
-            DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
-            assert(node._notified == 0, "invariant");
-            node.TState = ObjectWaiter::TS_RUN ;
-         }
-         Thread::SpinRelease (&_WaitSetLock) ;
-     }
+// Instead of No_Savepoint_Verifier it might be cheaper to
+// use an idiom of the form:
+//   auto int tmp = SafepointSynchronize::_safepoint_counter ;
+//   <code that must not run at safepoint>
+//   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
+// Since the tests are extremely cheap we could leave them enabled
+// for normal product builds.
 
-     // The thread is now either on off-list (TS_RUN),
-     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
-     // The Node's TState variable is stable from the perspective of this thread.
-     // No other threads will asynchronously modify TState.
-     guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
-     OrderAccess::loadload() ;
-     if (_succ == Self) _succ = NULL ;
-     WasNotified = node._notified ;
-
-     // Reentry phase -- reacquire the monitor.
-     // re-enter contended monitor after object.wait().
-     // retain OBJECT_WAIT state until re-enter successfully completes
-     // Thread state is thread_in_vm and oop access is again safe,
-     // although the raw address of the object may have changed.
-     // (Don't cache naked oops over safepoints, of course).
-
-     // post monitor waited event. Note that this is past-tense, we are done waiting.
-     if (JvmtiExport::should_post_monitor_waited()) {
-       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
-     }
-     OrderAccess::fence() ;
-
-     assert (Self->_Stalled != 0, "invariant") ;
-     Self->_Stalled = 0 ;
-
-     assert (_owner != Self, "invariant") ;
-     ObjectWaiter::TStates v = node.TState ;
-     if (v == ObjectWaiter::TS_RUN) {
-         enter (Self) ;
-     } else {
-         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
-         ReenterI (Self, &node) ;
-         node.wait_reenter_end(this);
-     }
-
-     // Self has reacquired the lock.
-     // Lifecycle - the node representing Self must not appear on any queues.
-     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
-     // want residual elements associated with this thread left on any lists.
-     guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
-     assert    (_owner == Self, "invariant") ;
-     assert    (_succ != Self , "invariant") ;
-   } // OSThreadWaitState()
-
-   jt->set_current_waiting_monitor(NULL);
-
-   guarantee (_recursions == 0, "invariant") ;
-   _recursions = save;     // restore the old recursion count
-   _waiters--;             // decrement the number of waiters
-
-   // Verify a few postconditions
-   assert (_owner == Self       , "invariant") ;
-   assert (_succ  != Self       , "invariant") ;
-   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
-
-   if (SyncFlags & 32) {
-      OrderAccess::fence() ;
-   }
-
-   // check if the notification happened
-   if (!WasNotified) {
-     // no, it could be timeout or Thread.interrupt() or both
-     // check for interrupt event, otherwise it is timeout
-     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
-       TEVENT (Wait - throw IEX from epilog) ;
-       THROW(vmSymbols::java_lang_InterruptedException());
-     }
-   }
-
-   // NOTE: Spurious wake up will be consider as timeout.
-   // Monitor notify has precedence over thread interrupt.
-}
-
-
-// Consider:
-// If the lock is cool (cxq == null && succ == null) and we're on an MP system
-// then instead of transferring a thread from the WaitSet to the EntryList
-// we might just dequeue a thread from the WaitSet and directly unpark() it.
-
-void ObjectMonitor::notify(TRAPS) {
-  CHECK_OWNER();
-  if (_WaitSet == NULL) {
-     TEVENT (Empty-Notify) ;
-     return ;
-  }
-  DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
-
-  int Policy = Knob_MoveNotifyee ;
-
-  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
-  ObjectWaiter * iterator = DequeueWaiter() ;
-  if (iterator != NULL) {
-     TEVENT (Notify1 - Transfer) ;
-     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
-     guarantee (iterator->_notified == 0, "invariant") ;
-     // Disposition - what might we do with iterator ?
-     // a.  add it directly to the EntryList - either tail or head.
-     // b.  push it onto the front of the _cxq.
-     // For now we use (a).
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER ;
-     }
-     iterator->_notified = 1 ;
-
-     ObjectWaiter * List = _EntryList ;
-     if (List != NULL) {
-        assert (List->_prev == NULL, "invariant") ;
-        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
-        assert (List != iterator, "invariant") ;
-     }
-
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL ;
-             _EntryList = iterator ;
-         } else {
-             List->_prev = iterator ;
-             iterator->_next = List ;
-             iterator->_prev = NULL ;
-             _EntryList = iterator ;
-        }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL ;
-             _EntryList = iterator ;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail ;
-            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
-            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
-            Tail->_next = iterator ;
-            iterator->_prev = Tail ;
-            iterator->_next = NULL ;
-        }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL ;
-             _EntryList = iterator ;
-         } else {
-            iterator->TState = ObjectWaiter::TS_CXQ ;
-            for (;;) {
-                ObjectWaiter * Front = _cxq ;
-                iterator->_next = Front ;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                    break ;
-                }
-            }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
-        iterator->TState = ObjectWaiter::TS_CXQ ;
-        for (;;) {
-            ObjectWaiter * Tail ;
-            Tail = _cxq ;
-            if (Tail == NULL) {
-                iterator->_next = NULL ;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break ;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next ;
-                Tail->_next = iterator ;
-                iterator->_prev = Tail ;
-                iterator->_next = NULL ;
-                break ;
-            }
-        }
-     } else {
-        ParkEvent * ev = iterator->_event ;
-        iterator->TState = ObjectWaiter::TS_RUN ;
-        OrderAccess::fence() ;
-        ev->unpark() ;
-     }
-
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
-
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
-  }
-
-  Thread::SpinRelease (&_WaitSetLock) ;
-
-  if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
-     ObjectSynchronizer::_sync_Notifications->inc() ;
-  }
-}
-
-
-void ObjectMonitor::notifyAll(TRAPS) {
-  CHECK_OWNER();
-  ObjectWaiter* iterator;
-  if (_WaitSet == NULL) {
-      TEVENT (Empty-NotifyAll) ;
-      return ;
-  }
-  DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
-
-  int Policy = Knob_MoveNotifyee ;
-  int Tally = 0 ;
-  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
-
-  for (;;) {
-     iterator = DequeueWaiter () ;
-     if (iterator == NULL) break ;
-     TEVENT (NotifyAll - Transfer1) ;
-     ++Tally ;
-
-     // Disposition - what might we do with iterator ?
-     // a.  add it directly to the EntryList - either tail or head.
-     // b.  push it onto the front of the _cxq.
-     // For now we use (a).
-     //
-     // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
-     // to the EntryList.  This could be done more efficiently with a single bulk transfer,
-     // but in practice it's not time-critical.  Beware too, that in prepend-mode we invert the
-     // order of the waiters.  Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
-     // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
-     // be "DCBAXYZ".
-
-     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
-     guarantee (iterator->_notified == 0, "invariant") ;
-     iterator->_notified = 1 ;
-     if (Policy != 4) {
-        iterator->TState = ObjectWaiter::TS_ENTER ;
-     }
-
-     ObjectWaiter * List = _EntryList ;
-     if (List != NULL) {
-        assert (List->_prev == NULL, "invariant") ;
-        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
-        assert (List != iterator, "invariant") ;
-     }
-
-     if (Policy == 0) {       // prepend to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL ;
-             _EntryList = iterator ;
-         } else {
-             List->_prev = iterator ;
-             iterator->_next = List ;
-             iterator->_prev = NULL ;
-             _EntryList = iterator ;
-        }
-     } else
-     if (Policy == 1) {      // append to EntryList
-         if (List == NULL) {
-             iterator->_next = iterator->_prev = NULL ;
-             _EntryList = iterator ;
-         } else {
-            // CONSIDER:  finding the tail currently requires a linear-time walk of
-            // the EntryList.  We can make tail access constant-time by converting to
-            // a CDLL instead of using our current DLL.
-            ObjectWaiter * Tail ;
-            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
-            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
-            Tail->_next = iterator ;
-            iterator->_prev = Tail ;
-            iterator->_next = NULL ;
-        }
-     } else
-     if (Policy == 2) {      // prepend to cxq
-         // prepend to cxq
-         iterator->TState = ObjectWaiter::TS_CXQ ;
-         for (;;) {
-             ObjectWaiter * Front = _cxq ;
-             iterator->_next = Front ;
-             if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
-                 break ;
-             }
-         }
-     } else
-     if (Policy == 3) {      // append to cxq
-        iterator->TState = ObjectWaiter::TS_CXQ ;
-        for (;;) {
-            ObjectWaiter * Tail ;
-            Tail = _cxq ;
-            if (Tail == NULL) {
-                iterator->_next = NULL ;
-                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
-                   break ;
-                }
-            } else {
-                while (Tail->_next != NULL) Tail = Tail->_next ;
-                Tail->_next = iterator ;
-                iterator->_prev = Tail ;
-                iterator->_next = NULL ;
-                break ;
-            }
-        }
-     } else {
-        ParkEvent * ev = iterator->_event ;
-        iterator->TState = ObjectWaiter::TS_RUN ;
-        OrderAccess::fence() ;
-        ev->unpark() ;
-     }
-
-     if (Policy < 4) {
-       iterator->wait_reenter_begin(this);
-     }
-
-     // _WaitSetLock protects the wait queue, not the EntryList.  We could
-     // move the add-to-EntryList operation, above, outside the critical section
-     // protected by _WaitSetLock.  In practice that's not useful.  With the
-     // exception of  wait() timeouts and interrupts the monitor owner
-     // is the only thread that grabs _WaitSetLock.  There's almost no contention
-     // on _WaitSetLock so it's not profitable to reduce the length of the
-     // critical section.
-  }
-
-  Thread::SpinRelease (&_WaitSetLock) ;
-
-  if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
-     ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
-  }
+void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
+  assert(THREAD == JavaThread::current(), "must be current Java thread");
+  No_Safepoint_Verifier nsv ;
+  ReleaseJavaMonitorsClosure rjmc(THREAD);
+  Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
+  ObjectSynchronizer::monitors_iterate(&rjmc);
+  Thread::muxRelease(&ListLock);
+  THREAD->clear_pending_exception();
 }
 
-// check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
-// TODO-FIXME: remove check_slow() -- it's likely dead.
-
-void ObjectMonitor::check_slow(TRAPS) {
-  TEVENT (check_slow - throw IMSX) ;
-  assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
-  THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
-}
-
-
-// -------------------------------------------------------------------------
-// The raw monitor subsystem is entirely distinct from normal
-// java-synchronization or jni-synchronization.  raw monitors are not
-// associated with objects.  They can be implemented in any manner
-// that makes sense.  The original implementors decided to piggy-back
-// the raw-monitor implementation on the existing Java objectMonitor mechanism.
-// This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
-// Specifically, we should not implement raw monitors via java monitors.
-// Time permitting, we should disentangle and deconvolve the two implementations
-// and move the resulting raw monitor implementation over to the JVMTI directories.
-// Ideally, the raw monitor implementation would be built on top of
-// park-unpark and nothing else.
-//
-// raw monitors are used mainly by JVMTI
-// The raw monitor implementation borrows the ObjectMonitor structure,
-// but the operators are degenerate and extremely simple.
-//
-// Mixed use of a single objectMonitor instance -- as both a raw monitor
-// and a normal java monitor -- is not permissible.
-//
-// Note that we use the single RawMonitor_lock to protect queue operations for
-// _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
-// is deprecated and rare, this is not of concern.  The RawMonitor_lock can not
-// be held indefinitely.  The critical sections must be short and bounded.
-//
-// -------------------------------------------------------------------------
-
-int ObjectMonitor::SimpleEnter (Thread * Self) {
-  for (;;) {
-    if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
-       return OS_OK ;
-    }
-
-    ObjectWaiter Node (Self) ;
-    Self->_ParkEvent->reset() ;     // strictly optional
-    Node.TState = ObjectWaiter::TS_ENTER ;
-
-    RawMonitor_lock->lock_without_safepoint_check() ;
-    Node._next  = _EntryList ;
-    _EntryList  = &Node ;
-    OrderAccess::fence() ;
-    if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
-        _EntryList = Node._next ;
-        RawMonitor_lock->unlock() ;
-        return OS_OK ;
-    }
-    RawMonitor_lock->unlock() ;
-    while (Node.TState == ObjectWaiter::TS_ENTER) {
-       Self->_ParkEvent->park() ;
-    }
-  }
-}
-
-int ObjectMonitor::SimpleExit (Thread * Self) {
-  guarantee (_owner == Self, "invariant") ;
-  OrderAccess::release_store_ptr (&_owner, NULL) ;
-  OrderAccess::fence() ;
-  if (_EntryList == NULL) return OS_OK ;
-  ObjectWaiter * w ;
-
-  RawMonitor_lock->lock_without_safepoint_check() ;
-  w = _EntryList ;
-  if (w != NULL) {
-      _EntryList = w->_next ;
-  }
-  RawMonitor_lock->unlock() ;
-  if (w != NULL) {
-      guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
-      ParkEvent * ev = w->_event ;
-      w->TState = ObjectWaiter::TS_RUN ;
-      OrderAccess::fence() ;
-      ev->unpark() ;
-  }
-  return OS_OK ;
-}
-
-int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
-  guarantee (_owner == Self  , "invariant") ;
-  guarantee (_recursions == 0, "invariant") ;
-
-  ObjectWaiter Node (Self) ;
-  Node._notified = 0 ;
-  Node.TState    = ObjectWaiter::TS_WAIT ;
-
-  RawMonitor_lock->lock_without_safepoint_check() ;
-  Node._next     = _WaitSet ;
-  _WaitSet       = &Node ;
-  RawMonitor_lock->unlock() ;
-
-  SimpleExit (Self) ;
-  guarantee (_owner != Self, "invariant") ;
-
-  int ret = OS_OK ;
-  if (millis <= 0) {
-    Self->_ParkEvent->park();
-  } else {
-    ret = Self->_ParkEvent->park(millis);
-  }
-
-  // If thread still resides on the waitset then unlink it.
-  // Double-checked locking -- the usage is safe in this context
-  // as we TState is volatile and the lock-unlock operators are
-  // serializing (barrier-equivalent).
-
-  if (Node.TState == ObjectWaiter::TS_WAIT) {
-    RawMonitor_lock->lock_without_safepoint_check() ;
-    if (Node.TState == ObjectWaiter::TS_WAIT) {
-      // Simple O(n) unlink, but performance isn't critical here.
-      ObjectWaiter * p ;
-      ObjectWaiter * q = NULL ;
-      for (p = _WaitSet ; p != &Node; p = p->_next) {
-         q = p ;
-      }
-      guarantee (p == &Node, "invariant") ;
-      if (q == NULL) {
-        guarantee (p == _WaitSet, "invariant") ;
-        _WaitSet = p->_next ;
-      } else {
-        guarantee (p == q->_next, "invariant") ;
-        q->_next = p->_next ;
-      }
-      Node.TState = ObjectWaiter::TS_RUN ;
-    }
-    RawMonitor_lock->unlock() ;
-  }
-
-  guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
-  SimpleEnter (Self) ;
-
-  guarantee (_owner == Self, "invariant") ;
-  guarantee (_recursions == 0, "invariant") ;
-  return ret ;
-}
-
-int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
-  guarantee (_owner == Self, "invariant") ;
-  if (_WaitSet == NULL) return OS_OK ;
-
-  // We have two options:
-  // A. Transfer the threads from the WaitSet to the EntryList
-  // B. Remove the thread from the WaitSet and unpark() it.
-  //
-  // We use (B), which is crude and results in lots of futile
-  // context switching.  In particular (B) induces lots of contention.
-
-  ParkEvent * ev = NULL ;       // consider using a small auto array ...
-  RawMonitor_lock->lock_without_safepoint_check() ;
-  for (;;) {
-      ObjectWaiter * w = _WaitSet ;
-      if (w == NULL) break ;
-      _WaitSet = w->_next ;
-      if (ev != NULL) { ev->unpark(); ev = NULL; }
-      ev = w->_event ;
-      OrderAccess::loadstore() ;
-      w->TState = ObjectWaiter::TS_RUN ;
-      OrderAccess::storeload();
-      if (!All) break ;
-  }
-  RawMonitor_lock->unlock() ;
-  if (ev != NULL) ev->unpark();
-  return OS_OK ;
-}
-
-// Any JavaThread will enter here with state _thread_blocked
-int ObjectMonitor::raw_enter(TRAPS) {
-  TEVENT (raw_enter) ;
-  void * Contended ;
-
-  // don't enter raw monitor if thread is being externally suspended, it will
-  // surprise the suspender if a "suspended" thread can still enter monitor
-  JavaThread * jt = (JavaThread *)THREAD;
-  if (THREAD->is_Java_thread()) {
-    jt->SR_lock()->lock_without_safepoint_check();
-    while (jt->is_external_suspend()) {
-      jt->SR_lock()->unlock();
-      jt->java_suspend_self();
-      jt->SR_lock()->lock_without_safepoint_check();
-    }
-    // guarded by SR_lock to avoid racing with new external suspend requests.
-    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
-    jt->SR_lock()->unlock();
-  } else {
-    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
-  }
-
-  if (Contended == THREAD) {
-     _recursions ++ ;
-     return OM_OK ;
-  }
-
-  if (Contended == NULL) {
-     guarantee (_owner == THREAD, "invariant") ;
-     guarantee (_recursions == 0, "invariant") ;
-     return OM_OK ;
-  }
-
-  THREAD->set_current_pending_monitor(this);
-
-  if (!THREAD->is_Java_thread()) {
-     // No other non-Java threads besides VM thread would acquire
-     // a raw monitor.
-     assert(THREAD->is_VM_thread(), "must be VM thread");
-     SimpleEnter (THREAD) ;
-   } else {
-     guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
-     for (;;) {
-       jt->set_suspend_equivalent();
-       // cleared by handle_special_suspend_equivalent_condition() or
-       // java_suspend_self()
-       SimpleEnter (THREAD) ;
-
-       // were we externally suspended while we were waiting?
-       if (!jt->handle_special_suspend_equivalent_condition()) break ;
-
-       // This thread was externally suspended
-       //
-       // This logic isn't needed for JVMTI raw monitors,
-       // but doesn't hurt just in case the suspend rules change. This
-           // logic is needed for the ObjectMonitor.wait() reentry phase.
-           // We have reentered the contended monitor, but while we were
-           // waiting another thread suspended us. We don't want to reenter
-           // the monitor while suspended because that would surprise the
-           // thread that suspended us.
-           //
-           // Drop the lock -
-       SimpleExit (THREAD) ;
-
-           jt->java_suspend_self();
-         }
-
-     assert(_owner == THREAD, "Fatal error with monitor owner!");
-     assert(_recursions == 0, "Fatal error with monitor recursions!");
-  }
-
-  THREAD->set_current_pending_monitor(NULL);
-  guarantee (_recursions == 0, "invariant") ;
-  return OM_OK;
-}
-
-// Used mainly for JVMTI raw monitor implementation
-// Also used for ObjectMonitor::wait().
-int ObjectMonitor::raw_exit(TRAPS) {
-  TEVENT (raw_exit) ;
-  if (THREAD != _owner) {
-    return OM_ILLEGAL_MONITOR_STATE;
-  }
-  if (_recursions > 0) {
-    --_recursions ;
-    return OM_OK ;
-  }
-
-  void * List = _EntryList ;
-  SimpleExit (THREAD) ;
-
-  return OM_OK;
-}
-
-// Used for JVMTI raw monitor implementation.
-// All JavaThreads will enter here with state _thread_blocked
-
-int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
-  TEVENT (raw_wait) ;
-  if (THREAD != _owner) {
-    return OM_ILLEGAL_MONITOR_STATE;
-  }
-
-  // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
-  // The caller must be able to tolerate spurious returns from raw_wait().
-  THREAD->_ParkEvent->reset() ;
-  OrderAccess::fence() ;
-
-  // check interrupt event
-  if (interruptible && Thread::is_interrupted(THREAD, true)) {
-    return OM_INTERRUPTED;
-  }
-
-  intptr_t save = _recursions ;
-  _recursions = 0 ;
-  _waiters ++ ;
-  if (THREAD->is_Java_thread()) {
-    guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
-    ((JavaThread *)THREAD)->set_suspend_equivalent();
-  }
-  int rv = SimpleWait (THREAD, millis) ;
-  _recursions = save ;
-  _waiters -- ;
-
-  guarantee (THREAD == _owner, "invariant") ;
-  if (THREAD->is_Java_thread()) {
-     JavaThread * jSelf = (JavaThread *) THREAD ;
-     for (;;) {
-        if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
-        SimpleExit (THREAD) ;
-        jSelf->java_suspend_self();
-        SimpleEnter (THREAD) ;
-        jSelf->set_suspend_equivalent() ;
-     }
-  }
-  guarantee (THREAD == _owner, "invariant") ;
-
-  if (interruptible && Thread::is_interrupted(THREAD, true)) {
-    return OM_INTERRUPTED;
-  }
-  return OM_OK ;
-}
-
-int ObjectMonitor::raw_notify(TRAPS) {
-  TEVENT (raw_notify) ;
-  if (THREAD != _owner) {
-    return OM_ILLEGAL_MONITOR_STATE;
-  }
-  SimpleNotify (THREAD, false) ;
-  return OM_OK;
-}
-
-int ObjectMonitor::raw_notifyAll(TRAPS) {
-  TEVENT (raw_notifyAll) ;
-  if (THREAD != _owner) {
-    return OM_ILLEGAL_MONITOR_STATE;
-  }
-  SimpleNotify (THREAD, true) ;
-  return OM_OK;
-}
-
-#ifndef PRODUCT
-void ObjectMonitor::verify() {
-}
-
-void ObjectMonitor::print() {
-}
-#endif
-
 //------------------------------------------------------------------------------
 // Non-product code
 
--- a/src/share/vm/runtime/synchronizer.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/synchronizer.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -22,53 +22,6 @@
  *
  */
 
-class BasicLock VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
- private:
-  volatile markOop _displaced_header;
- public:
-  markOop      displaced_header() const               { return _displaced_header; }
-  void         set_displaced_header(markOop header)   { _displaced_header = header; }
-
-  void print_on(outputStream* st) const;
-
-  // move a basic lock (used during deoptimization
-  void move_to(oop obj, BasicLock* dest);
-
-  static int displaced_header_offset_in_bytes()       { return offset_of(BasicLock, _displaced_header); }
-};
-
-// A BasicObjectLock associates a specific Java object with a BasicLock.
-// It is currently embedded in an interpreter frame.
-
-// Because some machines have alignment restrictions on the control stack,
-// the actual space allocated by the interpreter may include padding words
-// after the end of the BasicObjectLock.  Also, in order to guarantee
-// alignment of the embedded BasicLock objects on such machines, we
-// put the embedded BasicLock at the beginning of the struct.
-
-class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
- private:
-  BasicLock _lock;                                    // the lock, must be double word aligned
-  oop       _obj;                                     // object holds the lock;
-
- public:
-  // Manipulation
-  oop      obj() const                                { return _obj;  }
-  void set_obj(oop obj)                               { _obj = obj; }
-  BasicLock* lock()                                   { return &_lock; }
-
-  // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
-  //       in interpreter activation frames since it includes machine-specific padding.
-  static int size()                                   { return sizeof(BasicObjectLock)/wordSize; }
-
-  // GC support
-  void oops_do(OopClosure* f) { f->do_oop(&_obj); }
-
-  static int obj_offset_in_bytes()                    { return offset_of(BasicObjectLock, _obj);  }
-  static int lock_offset_in_bytes()                   { return offset_of(BasicObjectLock, _lock); }
-};
 
 class ObjectMonitor;
 
@@ -163,6 +116,8 @@
   static void verify() PRODUCT_RETURN;
   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
 
+  static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
+
  private:
   enum { _BLOCKSIZE = 128 };
   static ObjectMonitor* gBlockList;
@@ -170,30 +125,6 @@
   static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
   static int gOmInUseCount;
 
- public:
-  static void Initialize () ;
-  static PerfCounter * _sync_ContendedLockAttempts ;
-  static PerfCounter * _sync_FutileWakeups ;
-  static PerfCounter * _sync_Parks ;
-  static PerfCounter * _sync_EmptyNotifications ;
-  static PerfCounter * _sync_Notifications ;
-  static PerfCounter * _sync_SlowEnter ;
-  static PerfCounter * _sync_SlowExit ;
-  static PerfCounter * _sync_SlowNotify ;
-  static PerfCounter * _sync_SlowNotifyAll ;
-  static PerfCounter * _sync_FailedSpins ;
-  static PerfCounter * _sync_SuccessfulSpins ;
-  static PerfCounter * _sync_PrivateA ;
-  static PerfCounter * _sync_PrivateB ;
-  static PerfCounter * _sync_MonInCirculation ;
-  static PerfCounter * _sync_MonScavenged ;
-  static PerfCounter * _sync_Inflations ;
-  static PerfCounter * _sync_Deflations ;
-  static PerfLongVariable * _sync_MonExtant ;
-
- public:
-  static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
-
 };
 
 // ObjectLocker enforced balanced locking and can never thrown an
--- a/src/share/vm/runtime/thread.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/thread.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -2921,6 +2921,9 @@
   // So that JDK version can be used as a discrimintor when parsing arguments
   JDK_Version_init();
 
+  // Update/Initialize System properties after JDK version number is known
+  Arguments::init_version_specific_system_properties();
+
   // Parse arguments
   jint parse_result = Arguments::parse(args);
   if (parse_result != JNI_OK) return parse_result;
@@ -2992,8 +2995,8 @@
   // crash Linux VM, see notes in os_linux.cpp.
   main_thread->create_stack_guard_pages();
 
-  // Initialize Java-Leve synchronization subsystem
-  ObjectSynchronizer::Initialize() ;
+  // Initialize Java-Level synchronization subsystem
+  ObjectMonitor::Initialize() ;
 
   // Initialize global modules
   jint status = init_globals();
@@ -3962,215 +3965,272 @@
   }
 }
 
-
-// Lifecycle management for TSM ParkEvents.
-// ParkEvents are type-stable (TSM).
-// In our particular implementation they happen to be immortal.
+// Internal SpinLock and Mutex
+// Based on ParkEvent
+
+// Ad-hoc mutual exclusion primitives: SpinLock and Mux
 //
-// We manage concurrency on the FreeList with a CAS-based
-// detach-modify-reattach idiom that avoids the ABA problems
-// that would otherwise be present in a simple CAS-based
-// push-pop implementation.   (push-one and pop-all)
+// We employ SpinLocks _only for low-contention, fixed-length
+// short-duration critical sections where we're concerned
+// about native mutex_t or HotSpot Mutex:: latency.
+// The mux construct provides a spin-then-block mutual exclusion
+// mechanism.
+//
+// Testing has shown that contention on the ListLock guarding gFreeList
+// is common.  If we implement ListLock as a simple SpinLock it's common
+// for the JVM to devolve to yielding with little progress.  This is true
+// despite the fact that the critical sections protected by ListLock are
+// extremely short.
 //
-// Caveat: Allocate() and Release() may be called from threads
-// other than the thread associated with the Event!
-// If we need to call Allocate() when running as the thread in
-// question then look for the PD calls to initialize native TLS.
-// Native TLS (Win32/Linux/Solaris) can only be initialized or
-// accessed by the associated thread.
-// See also pd_initialize().
-//
-// Note that we could defer associating a ParkEvent with a thread
-// until the 1st time the thread calls park().  unpark() calls to
-// an unprovisioned thread would be ignored.  The first park() call
-// for a thread would allocate and associate a ParkEvent and return
-// immediately.
-
-volatile int ParkEvent::ListLock = 0 ;
-ParkEvent * volatile ParkEvent::FreeList = NULL ;
-
-ParkEvent * ParkEvent::Allocate (Thread * t) {
-  // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
-  ParkEvent * ev ;
-
-  // Start by trying to recycle an existing but unassociated
-  // ParkEvent from the global free list.
+// TODO-FIXME: ListLock should be of type SpinLock.
+// We should make this a 1st-class type, integrated into the lock
+// hierarchy as leaf-locks.  Critically, the SpinLock structure
+// should have sufficient padding to avoid false-sharing and excessive
+// cache-coherency traffic.
+
+
+typedef volatile int SpinLockT ;
+
+void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
+  if (Atomic::cmpxchg (1, adr, 0) == 0) {
+     return ;   // normal fast-path return
+  }
+
+  // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
+  TEVENT (SpinAcquire - ctx) ;
+  int ctr = 0 ;
+  int Yields = 0 ;
   for (;;) {
-    ev = FreeList ;
-    if (ev == NULL) break ;
-    // 1: Detach - sequester or privatize the list
-    // Tantamount to ev = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
-       continue ;
-    }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    ParkEvent * List = ev->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
+     while (*adr != 0) {
+        ++ctr ;
+        if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
+           if (Yields > 5) {
+             // Consider using a simple NakedSleep() instead.
+             // Then SpinAcquire could be called by non-JVM threads
+             Thread::current()->_ParkEvent->park(1) ;
+           } else {
+             os::NakedYield() ;
+             ++Yields ;
+           }
+        } else {
+           SpinPause() ;
         }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        ParkEvent * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
-  }
-
-  if (ev != NULL) {
-    guarantee (ev->AssociatedWith == NULL, "invariant") ;
-  } else {
-    // Do this the hard way -- materialize a new ParkEvent.
-    // In rare cases an allocating thread might detach a long list --
-    // installing null into FreeList -- and then stall or be obstructed.
-    // A 2nd thread calling Allocate() would see FreeList == null.
-    // The list held privately by the 1st thread is unavailable to the 2nd thread.
-    // In that case the 2nd thread would have to materialize a new ParkEvent,
-    // even though free ParkEvents existed in the system.  In this case we end up
-    // with more ParkEvents in circulation than we need, but the race is
-    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
-    // is equal to the maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the freelist
-    // can be transiently inaccessible.  At worst we may end up with the
-    // # of ParkEvents in circulation slightly above the ideal.
-    // Note that if we didn't have the TSM/immortal constraint, then
-    // when reattaching, above, we could trim the list.
-    ev = new ParkEvent () ;
-    guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
-  }
-  ev->reset() ;                     // courtesy to caller
-  ev->AssociatedWith = t ;          // Associate ev with t
-  ev->FreeNext       = NULL ;
-  return ev ;
-}
-
-void ParkEvent::Release (ParkEvent * ev) {
-  if (ev == NULL) return ;
-  guarantee (ev->FreeNext == NULL      , "invariant") ;
-  ev->AssociatedWith = NULL ;
-  for (;;) {
-    // Push ev onto FreeList
-    // The mechanism is "half" lock-free.
-    ParkEvent * List = FreeList ;
-    ev->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+     }
+     if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
   }
 }
 
-// Override operator new and delete so we can ensure that the
-// least significant byte of ParkEvent addresses is 0.
-// Beware that excessive address alignment is undesirable
-// as it can result in D$ index usage imbalance as
-// well as bank access imbalance on Niagara-like platforms,
-// although Niagara's hash function should help.
-
-void * ParkEvent::operator new (size_t sz) {
-  return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
-}
-
-void ParkEvent::operator delete (void * a) {
-  // ParkEvents are type-stable and immortal ...
-  ShouldNotReachHere();
+void Thread::SpinRelease (volatile int * adr) {
+  assert (*adr != 0, "invariant") ;
+  OrderAccess::fence() ;      // guarantee at least release consistency.
+  // Roach-motel semantics.
+  // It's safe if subsequent LDs and STs float "up" into the critical section,
+  // but prior LDs and STs within the critical section can't be allowed
+  // to reorder or float past the ST that releases the lock.
+  *adr = 0 ;
 }
 
-
-// 6399321 As a temporary measure we copied & modified the ParkEvent::
-// allocate() and release() code for use by Parkers.  The Parker:: forms
-// will eventually be removed as we consolide and shift over to ParkEvents
-// for both builtin synchronization and JSR166 operations.
-
-volatile int Parker::ListLock = 0 ;
-Parker * volatile Parker::FreeList = NULL ;
-
-Parker * Parker::Allocate (JavaThread * t) {
-  guarantee (t != NULL, "invariant") ;
-  Parker * p ;
-
-  // Start by trying to recycle an existing but unassociated
-  // Parker from the global free list.
+// muxAcquire and muxRelease:
+//
+// *  muxAcquire and muxRelease support a single-word lock-word construct.
+//    The LSB of the word is set IFF the lock is held.
+//    The remainder of the word points to the head of a singly-linked list
+//    of threads blocked on the lock.
+//
+// *  The current implementation of muxAcquire-muxRelease uses its own
+//    dedicated Thread._MuxEvent instance.  If we're interested in
+//    minimizing the peak number of extant ParkEvent instances then
+//    we could eliminate _MuxEvent and "borrow" _ParkEvent as long
+//    as certain invariants were satisfied.  Specifically, care would need
+//    to be taken with regards to consuming unpark() "permits".
+//    A safe rule of thumb is that a thread would never call muxAcquire()
+//    if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
+//    park().  Otherwise the _ParkEvent park() operation in muxAcquire() could
+//    consume an unpark() permit intended for monitorenter, for instance.
+//    One way around this would be to widen the restricted-range semaphore
+//    implemented in park().  Another alternative would be to provide
+//    multiple instances of the PlatformEvent() for each thread.  One
+//    instance would be dedicated to muxAcquire-muxRelease, for instance.
+//
+// *  Usage:
+//    -- Only as leaf locks
+//    -- for short-term locking only as muxAcquire does not perform
+//       thread state transitions.
+//
+// Alternatives:
+// *  We could implement muxAcquire and muxRelease with MCS or CLH locks
+//    but with parking or spin-then-park instead of pure spinning.
+// *  Use Taura-Oyama-Yonenzawa locks.
+// *  It's possible to construct a 1-0 lock if we encode the lockword as
+//    (List,LockByte).  Acquire will CAS the full lockword while Release
+//    will STB 0 into the LockByte.  The 1-0 scheme admits stranding, so
+//    acquiring threads use timers (ParkTimed) to detect and recover from
+//    the stranding window.  Thread/Node structures must be aligned on 256-byte
+//    boundaries by using placement-new.
+// *  Augment MCS with advisory back-link fields maintained with CAS().
+//    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
+//    The validity of the backlinks must be ratified before we trust the value.
+//    If the backlinks are invalid the exiting thread must back-track through the
+//    the forward links, which are always trustworthy.
+// *  Add a successor indication.  The LockWord is currently encoded as
+//    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
+//    to provide the usual futile-wakeup optimization.
+//    See RTStt for details.
+// *  Consider schedctl.sc_nopreempt to cover the critical section.
+//
+
+
+typedef volatile intptr_t MutexT ;      // Mux Lock-word
+enum MuxBits { LOCKBIT = 1 } ;
+
+void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
+  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
+  if (w == 0) return ;
+  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+     return ;
+  }
+
+  TEVENT (muxAcquire - Contention) ;
+  ParkEvent * const Self = Thread::current()->_MuxEvent ;
+  assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
   for (;;) {
-    p = FreeList ;
-    if (p  == NULL) break ;
-    // 1: Detach
-    // Tantamount to p = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
-       continue ;
-    }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    Parker * List = p->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
+     int its = (os::is_MP() ? 100 : 0) + 1 ;
+
+     // Optional spin phase: spin-then-park strategy
+     while (--its >= 0) {
+       w = *Lock ;
+       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+          return ;
+       }
+     }
+
+     Self->reset() ;
+     Self->OnList = intptr_t(Lock) ;
+     // The following fence() isn't _strictly necessary as the subsequent
+     // CAS() both serializes execution and ratifies the fetched *Lock value.
+     OrderAccess::fence();
+     for (;;) {
+        w = *Lock ;
+        if ((w & LOCKBIT) == 0) {
+            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+                Self->OnList = 0 ;   // hygiene - allows stronger asserts
+                return ;
+            }
+            continue ;      // Interference -- *Lock changed -- Just retry
         }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        Parker * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
-  }
-
-  if (p != NULL) {
-    guarantee (p->AssociatedWith == NULL, "invariant") ;
-  } else {
-    // Do this the hard way -- materialize a new Parker..
-    // In rare cases an allocating thread might detach
-    // a long list -- installing null into FreeList --and
-    // then stall.  Another thread calling Allocate() would see
-    // FreeList == null and then invoke the ctor.  In this case we
-    // end up with more Parkers in circulation than we need, but
-    // the race is rare and the outcome is benign.
-    // Ideally, the # of extant Parkers is equal to the
-    // maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the
-    // freelist can be transiently inaccessible.  At worst
-    // we may end up with the # of Parkers in circulation
-    // slightly above the ideal.
-    p = new Parker() ;
-  }
-  p->AssociatedWith = t ;          // Associate p with t
-  p->FreeNext       = NULL ;
-  return p ;
-}
-
-
-void Parker::Release (Parker * p) {
-  if (p == NULL) return ;
-  guarantee (p->AssociatedWith != NULL, "invariant") ;
-  guarantee (p->FreeNext == NULL      , "invariant") ;
-  p->AssociatedWith = NULL ;
-  for (;;) {
-    // Push p onto FreeList
-    Parker * List = FreeList ;
-    p->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
+        assert (w & LOCKBIT, "invariant") ;
+        Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
+        if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
+     }
+
+     while (Self->OnList != 0) {
+        Self->park() ;
+     }
   }
 }
 
+void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
+  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
+  if (w == 0) return ;
+  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+    return ;
+  }
+
+  TEVENT (muxAcquire - Contention) ;
+  ParkEvent * ReleaseAfter = NULL ;
+  if (ev == NULL) {
+    ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
+  }
+  assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
+  for (;;) {
+    guarantee (ev->OnList == 0, "invariant") ;
+    int its = (os::is_MP() ? 100 : 0) + 1 ;
+
+    // Optional spin phase: spin-then-park strategy
+    while (--its >= 0) {
+      w = *Lock ;
+      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+        if (ReleaseAfter != NULL) {
+          ParkEvent::Release (ReleaseAfter) ;
+        }
+        return ;
+      }
+    }
+
+    ev->reset() ;
+    ev->OnList = intptr_t(Lock) ;
+    // The following fence() isn't _strictly necessary as the subsequent
+    // CAS() both serializes execution and ratifies the fetched *Lock value.
+    OrderAccess::fence();
+    for (;;) {
+      w = *Lock ;
+      if ((w & LOCKBIT) == 0) {
+        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+          ev->OnList = 0 ;
+          // We call ::Release while holding the outer lock, thus
+          // artificially lengthening the critical section.
+          // Consider deferring the ::Release() until the subsequent unlock(),
+          // after we've dropped the outer lock.
+          if (ReleaseAfter != NULL) {
+            ParkEvent::Release (ReleaseAfter) ;
+          }
+          return ;
+        }
+        continue ;      // Interference -- *Lock changed -- Just retry
+      }
+      assert (w & LOCKBIT, "invariant") ;
+      ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
+      if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
+    }
+
+    while (ev->OnList != 0) {
+      ev->park() ;
+    }
+  }
+}
+
+// Release() must extract a successor from the list and then wake that thread.
+// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
+// similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based
+// Release() would :
+// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
+// (B) Extract a successor from the private list "in-hand"
+// (C) attempt to CAS() the residual back into *Lock over null.
+//     If there were any newly arrived threads and the CAS() would fail.
+//     In that case Release() would detach the RATs, re-merge the list in-hand
+//     with the RATs and repeat as needed.  Alternately, Release() might
+//     detach and extract a successor, but then pass the residual list to the wakee.
+//     The wakee would be responsible for reattaching and remerging before it
+//     competed for the lock.
+//
+// Both "pop" and DMR are immune from ABA corruption -- there can be
+// multiple concurrent pushers, but only one popper or detacher.
+// This implementation pops from the head of the list.  This is unfair,
+// but tends to provide excellent throughput as hot threads remain hot.
+// (We wake recently run threads first).
+
+void Thread::muxRelease (volatile intptr_t * Lock)  {
+  for (;;) {
+    const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
+    assert (w & LOCKBIT, "invariant") ;
+    if (w == LOCKBIT) return ;
+    ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
+    assert (List != NULL, "invariant") ;
+    assert (List->OnList == intptr_t(Lock), "invariant") ;
+    ParkEvent * nxt = List->ListNext ;
+
+    // The following CAS() releases the lock and pops the head element.
+    if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
+      continue ;
+    }
+    List->OnList = 0 ;
+    OrderAccess::fence() ;
+    List->unpark () ;
+    return ;
+  }
+}
+
+
 void Threads::verify() {
   ALL_JAVA_THREADS(p) {
     p->verify();
--- a/src/share/vm/runtime/thread.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/runtime/thread.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -30,6 +30,7 @@
 class ThreadStatistics;
 class ConcurrentLocksDump;
 class ParkEvent ;
+class Parker;
 
 class ciEnv;
 class CompileThread;
@@ -544,7 +545,6 @@
   static void muxAcquire  (volatile intptr_t * Lock, const char * Name) ;
   static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
   static void muxRelease  (volatile intptr_t * Lock) ;
-
 };
 
 // Inline implementation of Thread::current()
@@ -1769,100 +1769,3 @@
   }
 };
 
-// ParkEvents are type-stable and immortal.
-//
-// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
-// associated with the thread for the thread's entire lifetime - the relationship is
-// stable. A thread will be associated at most one ParkEvent.  When the thread
-// expires, the ParkEvent moves to the EventFreeList.  New threads attempt to allocate from
-// the EventFreeList before creating a new Event.  Type-stability frees us from
-// worrying about stale Event or Thread references in the objectMonitor subsystem.
-// (A reference to ParkEvent is always valid, even though the event may no longer be associated
-// with the desired or expected thread.  A key aspect of this design is that the callers of
-// park, unpark, etc must tolerate stale references and spurious wakeups).
-//
-// Only the "associated" thread can block (park) on the ParkEvent, although
-// any other thread can unpark a reachable parkevent.  Park() is allowed to
-// return spuriously.  In fact park-unpark a really just an optimization to
-// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
-// A degenerate albeit "impolite" park-unpark implementation could simply return.
-// See http://blogs.sun.com/dave for more details.
-//
-// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
-// thread proxies, and simply make the THREAD structure type-stable and persistent.
-// Currently, we unpark events associated with threads, but ideally we'd just
-// unpark threads.
-//
-// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
-// platform-independent.  PlatformEvent provides park(), unpark(), etc., and
-// is abstract -- that is, a PlatformEvent should never be instantiated except
-// as part of a ParkEvent.
-// Equivalently we could have defined a platform-independent base-class that
-// exported Allocate(), Release(), etc.  The platform-specific class would extend
-// that base-class, adding park(), unpark(), etc.
-//
-// A word of caution: The JVM uses 2 very similar constructs:
-// 1. ParkEvent are used for Java-level "monitor" synchronization.
-// 2. Parkers are used by JSR166-JUC park-unpark.
-//
-// We'll want to eventually merge these redundant facilities and use ParkEvent.
-
-
-class ParkEvent : public os::PlatformEvent {
-  private:
-    ParkEvent * FreeNext ;
-
-    // Current association
-    Thread * AssociatedWith ;
-    intptr_t RawThreadIdentity ;        // LWPID etc
-    volatile int Incarnation ;
-
-    // diagnostic : keep track of last thread to wake this thread.
-    // this is useful for construction of dependency graphs.
-    void * LastWaker ;
-
-  public:
-    // MCS-CLH list linkage and Native Mutex/Monitor
-    ParkEvent * volatile ListNext ;
-    ParkEvent * volatile ListPrev ;
-    volatile intptr_t OnList ;
-    volatile int TState ;
-    volatile int Notified ;             // for native monitor construct
-    volatile int IsWaiting ;            // Enqueued on WaitSet
-
-
-  private:
-    static ParkEvent * volatile FreeList ;
-    static volatile int ListLock ;
-
-    // It's prudent to mark the dtor as "private"
-    // ensuring that it's not visible outside the package.
-    // Unfortunately gcc warns about such usage, so
-    // we revert to the less desirable "protected" visibility.
-    // The other compilers accept private dtors.
-
-  protected:        // Ensure dtor is never invoked
-    ~ParkEvent() { guarantee (0, "invariant") ; }
-
-    ParkEvent() : PlatformEvent() {
-       AssociatedWith = NULL ;
-       FreeNext       = NULL ;
-       ListNext       = NULL ;
-       ListPrev       = NULL ;
-       OnList         = 0 ;
-       TState         = 0 ;
-       Notified       = 0 ;
-       IsWaiting      = 0 ;
-    }
-
-    // We use placement-new to force ParkEvent instances to be
-    // aligned on 256-byte address boundaries.  This ensures that the least
-    // significant byte of a ParkEvent address is always 0.
-
-    void * operator new (size_t sz) ;
-    void operator delete (void * a) ;
-
-  public:
-    static ParkEvent * Allocate (Thread * t) ;
-    static void Release (ParkEvent * e) ;
-} ;
--- a/src/share/vm/shark/sharkCompiler.hpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/shark/sharkCompiler.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -103,8 +103,7 @@
   // Global access
  public:
   static SharkCompiler* compiler() {
-    AbstractCompiler *compiler =
-      CompileBroker::compiler(CompLevel_fast_compile);
+    AbstractCompiler *compiler = CompileBroker::compiler(CompLevel_simple);
     assert(compiler->is_shark() && compiler->is_initialized(), "should be");
     return (SharkCompiler *) compiler;
   }
--- a/src/share/vm/utilities/debug.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/utilities/debug.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -51,14 +51,16 @@
 
 
 void warning(const char* format, ...) {
-  // In case error happens before init or during shutdown
-  if (tty == NULL) ostream_init();
+  if (PrintWarnings) {
+    // In case error happens before init or during shutdown
+    if (tty == NULL) ostream_init();
 
-  tty->print("%s warning: ", VM_Version::vm_name());
-  va_list ap;
-  va_start(ap, format);
-  tty->vprint_cr(format, ap);
-  va_end(ap);
+    tty->print("%s warning: ", VM_Version::vm_name());
+    va_list ap;
+    va_start(ap, format);
+    tty->vprint_cr(format, ap);
+    va_end(ap);
+  }
   if (BreakAtWarning) BREAKPOINT;
 }
 
--- a/src/share/vm/utilities/exceptions.cpp	Thu Oct 28 14:46:29 2010 -0700
+++ b/src/share/vm/utilities/exceptions.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -61,6 +61,18 @@
    ShouldNotReachHere();
   }
 
+#ifdef ASSERT
+  // Check for trying to throw stack overflow before initialization is complete
+  // to prevent infinite recursion trying to initialize stack overflow without
+  // adequate stack space.
+  // This can happen with stress testing a large value of StackShadowPages
+  if (h_exception()->klass() == SystemDictionary::StackOverflowError_klass()) {
+    instanceKlass* ik = instanceKlass::cast(h_exception->klass());
+    assert(ik->is_initialized(),
+           "need to increase min_stack_allowed calculation");
+  }
+#endif // ASSERT
+
   if (thread->is_VM_thread()
       || thread->is_Compiler_thread() ) {
     // We do not care what kind of exception we get for the vm-thread or a thread which
@@ -91,7 +103,6 @@
     thread->set_pending_exception(Universe::vm_exception(), file, line);
     return true;
   }
-
   return false;
 }
 
@@ -193,6 +204,7 @@
     klassOop k = SystemDictionary::StackOverflowError_klass();
     oop e = instanceKlass::cast(k)->allocate_instance(CHECK);
     exception = Handle(THREAD, e);  // fill_in_stack trace does gc
+    assert(instanceKlass::cast(k)->is_initialized(), "need to increase min_stack_allowed calculation");
     if (StackTraceInThrowable) {
       java_lang_Throwable::fill_in_stack_trace(exception);
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6987555/Test6987555.java	Mon Nov 01 10:49:14 2010 -0700
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6987555
+ * @summary JSR 292 unboxing to a boolean value fails on big-endian SPARC
+ *
+ * @run main/othervm -Xint -ea -XX:+UnlockExperimentalVMOptions -XX:+EnableMethodHandles -XX:+EnableInvokeDynamic -XX:+UnlockDiagnosticVMOptions -XX:+VerifyMethodHandles Test6987555
+ */
+
+import java.dyn.*;
+
+public class Test6987555 {
+    private static final Class   CLASS = Test6987555.class;
+    private static final String  NAME  = "foo";
+    private static final boolean DEBUG = false;
+
+    public static void main(String[] args) throws Throwable {
+        testboolean();
+        testbyte();
+        testchar();
+        testshort();
+        testint();
+    }
+
+    // boolean
+    static void testboolean() throws Throwable {
+        doboolean(false);
+        doboolean(true);
+    }
+    static void doboolean(boolean x) throws Throwable {
+        if (DEBUG)  System.out.println("boolean=" + x);
+        MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(boolean.class, boolean.class));
+        MethodHandle mh2 = mh1.asType(MethodType.methodType(boolean.class, Boolean.class));
+        boolean a = mh1.<boolean>invokeExact(x);
+        boolean b = mh2.<boolean>invokeExact(Boolean.valueOf(x));
+        assert a == b : a + " != " + b;
+    }
+
+    // byte
+    static void testbyte() throws Throwable {
+        byte[] a = new byte[] {
+            Byte.MIN_VALUE,
+            Byte.MIN_VALUE + 1,
+            -0x0F,
+            -1,
+            0,
+            1,
+            0x0F,
+            Byte.MAX_VALUE - 1,
+            Byte.MAX_VALUE
+        };
+        for (int i = 0; i < a.length; i++) {
+            dobyte(a[i]);
+        }
+    }
+    static void dobyte(byte x) throws Throwable {
+        if (DEBUG)  System.out.println("byte=" + x);
+        MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(byte.class, byte.class));
+        MethodHandle mh2 = mh1.asType(MethodType.methodType(byte.class, Byte.class));
+        byte a = mh1.<byte>invokeExact(x);
+        byte b = mh2.<byte>invokeExact(Byte.valueOf(x));
+        assert a == b : a + " != " + b;
+    }
+
+    // char
+    static void testchar() throws Throwable {
+        char[] a = new char[] {
+            Character.MIN_VALUE,
+            Character.MIN_VALUE + 1,
+            0x000F,
+            0x00FF,
+            0x0FFF,
+            Character.MAX_VALUE - 1,
+            Character.MAX_VALUE
+        };
+        for (int i = 0; i < a.length; i++) {
+            dochar(a[i]);
+        }
+    }
+    static void dochar(char x) throws Throwable {
+        if (DEBUG)  System.out.println("char=" + x);
+        MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(char.class, char.class));
+        MethodHandle mh2 = mh1.asType(MethodType.methodType(char.class, Character.class));
+        char a = mh1.<char>invokeExact(x);
+        char b = mh2.<char>invokeExact(Character.valueOf(x));
+        assert a == b : a + " != " + b;
+    }
+
+    // short
+    static void testshort() throws Throwable {
+        short[] a = new short[] {
+            Short.MIN_VALUE,
+            Short.MIN_VALUE + 1,
+            -0x0FFF,
+            -0x00FF,
+            -0x000F,
+            -1,
+            0,
+            1,
+            0x000F,
+            0x00FF,
+            0x0FFF,
+            Short.MAX_VALUE - 1,
+            Short.MAX_VALUE
+        };
+        for (int i = 0; i < a.length; i++) {
+            doshort(a[i]);
+        }
+    }
+    static void doshort(short x) throws Throwable {
+        if (DEBUG)  System.out.println("short=" + x);
+        MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(short.class, short.class));
+        MethodHandle mh2 = mh1.asType(MethodType.methodType(short.class, Short.class));
+        short a = mh1.<short>invokeExact(x);
+        short b = mh2.<short>invokeExact(Short.valueOf(x));
+        assert a == b : a + " != " + b;
+    }
+
+    // int
+    static void testint() throws Throwable {
+        int[] a = new int[] {
+            Integer.MIN_VALUE,
+            Integer.MIN_VALUE + 1,
+            -0x00000FFF,
+            -0x000000FF,
+            -0x0000000F,
+            -1,
+            0,
+            1,
+            0x0000000F,
+            0x000000FF,
+            0x00000FFF,
+            Integer.MAX_VALUE - 1,
+            Integer.MAX_VALUE
+        };
+        for (int i = 0; i < a.length; i++) {
+            doint(a[i]);
+        }
+    }
+    static void doint(int x) throws Throwable {
+        if (DEBUG)  System.out.println("int=" + x);
+        MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(int.class, int.class));
+        MethodHandle mh2 = mh1.asType(MethodType.methodType(int.class, Integer.class));
+        int a = mh1.<int>invokeExact(x);
+        int b = mh2.<int>invokeExact(Integer.valueOf(x));
+        assert a == b : a + " != " + b;
+    }
+
+    public static boolean foo(boolean i) { return i; }
+    public static byte    foo(byte    i) { return i; }
+    public static char    foo(char    i) { return i; }
+    public static short   foo(short   i) { return i; }
+    public static int     foo(int     i) { return i; }
+}