changeset 982:7a102acc9f17

Merge
author trims
date Fri, 25 Sep 2009 12:17:06 -0700
parents 89e0543e1737 (current diff) 1af62b6ca0f9 (diff)
children faf94d94786b
files src/cpu/x86/vm/c1_LIRGenerator_x86.cpp src/share/vm/gc_implementation/shared/markSweep.cpp src/share/vm/prims/jvm.h
diffstat 57 files changed, 515 insertions(+), 198 deletions(-) [+]
line wrap: on
line diff
--- a/make/linux/makefiles/jvmti.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/linux/makefiles/jvmti.make	Fri Sep 25 12:17:06 2009 -0700
@@ -70,10 +70,10 @@
 both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl
 
 $(JvmtiGenClass): $(JvmtiGenSource)
-	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiGenSource)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiGenSource)
 
 $(JvmtiEnvFillClass): $(JvmtiEnvFillSource)
-	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
 
 $(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
 	@echo Generating $@
--- a/make/linux/makefiles/mapfile-vers-debug	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/linux/makefiles/mapfile-vers-debug	Fri Sep 25 12:17:06 2009 -0700
@@ -74,6 +74,7 @@
                 JVM_CurrentTimeMillis;
                 JVM_DefineClass;
                 JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
                 JVM_DesiredAssertionStatus;
                 JVM_DisableCompiler;
                 JVM_DoPrivileged;
--- a/make/linux/makefiles/mapfile-vers-product	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/linux/makefiles/mapfile-vers-product	Fri Sep 25 12:17:06 2009 -0700
@@ -74,6 +74,7 @@
                 JVM_CurrentTimeMillis;
                 JVM_DefineClass;
                 JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
                 JVM_DesiredAssertionStatus;
                 JVM_DisableCompiler;
                 JVM_DoPrivileged;
--- a/make/linux/makefiles/rules.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/linux/makefiles/rules.make	Fri Sep 25 12:17:06 2009 -0700
@@ -122,12 +122,20 @@
 endif
 endif
 
+COMPILE.JAVAC += $(BOOTSTRAP_JAVAC_FLAGS)
+
 SUM = /usr/bin/sum
 
 # 'gmake MAKE_VERBOSE=y' gives all the gory details.
 QUIETLY$(MAKE_VERBOSE)  = @
 RUN.JAR$(MAKE_VERBOSE) += >/dev/null
 
+# Settings for javac
+BOOT_SOURCE_LANGUAGE_VERSION = 6
+BOOT_TARGET_CLASS_VERSION = 6
+JAVAC_FLAGS = -g -encoding ascii
+BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
+
 # With parallel makes, print a message at the end of compilation.
 ifeq    ($(findstring j,$(MFLAGS)),j)
 COMPILE_DONE    = && { echo Done with $<; }
--- a/make/linux/makefiles/sa.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/linux/makefiles/sa.make	Fri Sep 25 12:17:06 2009 -0700
@@ -74,8 +74,8 @@
 	  mkdir -p $(SA_CLASSDIR);        \
 	fi
 
-	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1)
-	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2)
 
 	$(QUIETLY) $(REMOTE) $(COMPILE.RMIC)  -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
--- a/make/linux/makefiles/top.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/linux/makefiles/top.make	Fri Sep 25 12:17:06 2009 -0700
@@ -24,7 +24,7 @@
 
 # top.make is included in the Makefile in the build directories.
 # It DOES NOT include the vm dependency info in order to be faster.
-# It's main job is to implement the incremental form of make lists.
+# Its main job is to implement the incremental form of make lists.
 # It also:
 #   -builds and runs adlc via adlc.make
 #   -generates JVMTI source and docs via jvmti.make (JSR-163)
@@ -114,7 +114,7 @@
 # make makeDeps: (and zap the cached db files to force a nonincremental run)
 
 $(GENERATED)/$(MakeDepsClass): $(MakeDepsSources)
-	@$(REMOTE) $(COMPILE.JAVAC) -classpath $(GAMMADIR)/src/share/tools/MakeDeps -g -d $(GENERATED) $(MakeDepsSources)
+	@$(REMOTE) $(COMPILE.JAVAC) -classpath $(GAMMADIR)/src/share/tools/MakeDeps -d $(GENERATED) $(MakeDepsSources)
 	@echo Removing $(Incremental_Lists) to force regeneration.
 	@rm -f $(Incremental_Lists)
 	@$(CDG) echo >$(Cached_plat)
--- a/make/solaris/makefiles/jvmti.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/solaris/makefiles/jvmti.make	Fri Sep 25 12:17:06 2009 -0700
@@ -69,10 +69,10 @@
 both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl
 
 $(JvmtiGenClass): $(JvmtiGenSource)
-	$(QUIETLY) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiGenSource)
+	$(QUIETLY) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiGenSource)
 
 $(JvmtiEnvFillClass): $(JvmtiEnvFillSource)
-	$(QUIETLY) $(COMPILE.JAVAC) -g -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
+	$(QUIETLY) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
 
 $(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
 	@echo Generating $@
--- a/make/solaris/makefiles/mapfile-vers	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/solaris/makefiles/mapfile-vers	Fri Sep 25 12:17:06 2009 -0700
@@ -74,6 +74,7 @@
 		JVM_CurrentTimeMillis;
 		JVM_DefineClass;
 		JVM_DefineClassWithSource;
+		JVM_DefineClassWithSourceCond;
 		JVM_DesiredAssertionStatus;
 		JVM_DisableCompiler;
 		JVM_DoPrivileged;
--- a/make/solaris/makefiles/rules.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/solaris/makefiles/rules.make	Fri Sep 25 12:17:06 2009 -0700
@@ -122,12 +122,20 @@
 endif
 endif
 
+COMPILE.JAVAC += $(BOOTSTRAP_JAVAC_FLAGS)
+
 SUM = /usr/bin/sum
 
 # 'gmake MAKE_VERBOSE=y' gives all the gory details.
 QUIETLY$(MAKE_VERBOSE)  = @
 RUN.JAR$(MAKE_VERBOSE) += >/dev/null
 
+# Settings for javac
+BOOT_SOURCE_LANGUAGE_VERSION = 6
+BOOT_TARGET_CLASS_VERSION = 6
+JAVAC_FLAGS = -g -encoding ascii
+BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
+
 # With parallel makes, print a message at the end of compilation.
 ifeq    ($(findstring j,$(MFLAGS)),j)
 COMPILE_DONE    = && { echo Done with $<; }
--- a/make/solaris/makefiles/sa.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/solaris/makefiles/sa.make	Fri Sep 25 12:17:06 2009 -0700
@@ -67,8 +67,8 @@
 	$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
 	  mkdir -p $(SA_CLASSDIR);        \
 	fi
-	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1)
-	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2)
+	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1)
+	$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2)
 	
 	$(QUIETLY) $(COMPILE.RMIC)  -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
--- a/make/solaris/makefiles/top.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/solaris/makefiles/top.make	Fri Sep 25 12:17:06 2009 -0700
@@ -24,7 +24,7 @@
 
 # top.make is included in the Makefile in the build directories.
 # It DOES NOT include the vm dependency info in order to be faster.
-# It's main job is to implement the incremental form of make lists.
+# Its main job is to implement the incremental form of make lists.
 # It also:
 #   -builds and runs adlc via adlc.make
 #   -generates JVMTI source and docs via jvmti.make (JSR-163)
@@ -112,7 +112,7 @@
 # make makeDeps: (and zap the cached db files to force a nonincremental run)
 
 $(GENERATED)/$(MakeDepsClass): $(MakeDepsSources)
-	@$(COMPILE.JAVAC) -classpath $(GAMMADIR)/src/share/tools/MakeDeps -g -d $(GENERATED) $(MakeDepsSources)
+	@$(COMPILE.JAVAC) -classpath $(GAMMADIR)/src/share/tools/MakeDeps -d $(GENERATED) $(MakeDepsSources)
 	@echo Removing $(Incremental_Lists) to force regeneration.
 	@rm -f $(Incremental_Lists)
 	@$(CDG) echo >$(Cached_plat)
--- a/make/windows/makefiles/generated.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/windows/makefiles/generated.make	Fri Sep 25 12:17:06 2009 -0700
@@ -91,7 +91,7 @@
 classes/MakeDeps.class: $(MakeDepsSources)
 	if exist classes rmdir /s /q classes
 	mkdir classes
-	$(COMPILE_JAVAC) -classpath $(WorkSpace)\src\share\tools\MakeDeps -g -d classes $(MakeDepsSources)
+	$(COMPILE_JAVAC) -classpath $(WorkSpace)\src\share\tools\MakeDeps -d classes $(MakeDepsSources)
 
 !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered")
 
--- a/make/windows/makefiles/jvmti.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/windows/makefiles/jvmti.make	Fri Sep 25 12:17:06 2009 -0700
@@ -68,10 +68,10 @@
         @if not exist $(JvmtiOutDir) mkdir $(JvmtiOutDir)
 
 $(JvmtiGenClass): $(JvmtiGenSource)
-	$(COMPILE_JAVAC) -g -d $(JvmtiOutDir) $(JvmtiGenSource)
+	$(COMPILE_JAVAC) -d $(JvmtiOutDir) $(JvmtiGenSource)
 
 $(JvmtiEnvFillClass): $(JvmtiEnvFillSource)
-	@$(COMPILE_JAVAC) -g -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
+	@$(COMPILE_JAVAC) -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
 
 $(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
 	@echo Generating $@
--- a/make/windows/makefiles/rules.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/windows/makefiles/rules.make	Fri Sep 25 12:17:06 2009 -0700
@@ -29,7 +29,7 @@
 RUN_JAVAP=$(BootStrapDir)\bin\javap
 RUN_JAVAH=$(BootStrapDir)\bin\javah
 RUN_JAR=$(BootStrapDir)\bin\jar
-COMPILE_JAVAC=$(BootStrapDir)\bin\javac
+COMPILE_JAVAC=$(BootStrapDir)\bin\javac $(BOOTSTRAP_JAVAC_FLAGS)
 COMPILE_RMIC=$(BootStrapDir)\bin\rmic
 BOOT_JAVA_HOME=$(BootStrapDir)
 !else
@@ -37,11 +37,17 @@
 RUN_JAVAP=javap
 RUN_JAVAH=javah
 RUN_JAR=jar
-COMPILE_JAVAC=javac
+COMPILE_JAVAC=javac $(BOOTSTRAP_JAVAC_FLAGS)
 COMPILE_RMIC=rmic
 BOOT_JAVA_HOME=
 !endif
 
+# Settings for javac
+BOOT_SOURCE_LANGUAGE_VERSION=6
+BOOT_TARGET_CLASS_VERSION=6
+JAVAC_FLAGS=-g -encoding ascii
+BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
+
 ProjectFile=vm.vcproj
 
 !if "$(MSC_VER)" == "1200"
--- a/make/windows/makefiles/sa.make	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/windows/makefiles/sa.make	Fri Sep 25 12:17:06 2009 -0700
@@ -55,9 +55,9 @@
 $(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\)
 	@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR)
 	@echo ...Building sa-jdi.jar
-	@echo ...$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) ....
-	@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
-	@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
+	@echo ...$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) ....
+	@$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
+	@$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
 	$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
 	$(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES)
 	$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
--- a/make/windows/projectfiles/common/Makefile	Tue Sep 22 14:06:10 2009 -0700
+++ b/make/windows/projectfiles/common/Makefile	Fri Sep 25 12:17:06 2009 -0700
@@ -179,6 +179,6 @@
 $(HOTSPOTBUILDSPACE)/classes/MakeDeps.class: $(MakeDepsSources)
 	@if exist $(HOTSPOTBUILDSPACE)\classes rmdir /s /q $(HOTSPOTBUILDSPACE)\classes
 	@mkdir $(HOTSPOTBUILDSPACE)\classes
-	@$(COMPILE_JAVAC) -classpath $(HOTSPOTWORKSPACE)\src\share\tools\MakeDeps -g -d $(HOTSPOTBUILDSPACE)/classes $(MakeDepsSources)
+	@$(COMPILE_JAVAC) -classpath $(HOTSPOTWORKSPACE)\src\share\tools\MakeDeps -d $(HOTSPOTBUILDSPACE)/classes $(MakeDepsSources)
 
 FORCE:
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -1696,6 +1696,9 @@
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register scratch,
                                         int start_row, Label& done) {
+  if (TypeProfileWidth == 0)
+    return;
+
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
   // Test this row for both the receiver and for null.
--- a/src/cpu/sparc/vm/sparc.ad	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Fri Sep 25 12:17:06 2009 -0700
@@ -5707,7 +5707,7 @@
   effect(TEMP dst, TEMP tmp);
   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
 
-  size(3*4);
+  size((3+1)*4);  // set may use two instructions.
   format %{ "LDUH   $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
             "SET    $mask,$tmp\n\t"
             "AND    $dst,$tmp,$dst" %}
@@ -5851,7 +5851,7 @@
   effect(TEMP dst, TEMP tmp);
   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
 
-  size(3*4);
+  size((3+1)*4);  // set may use two instructions.
   format %{ "LDUW   $mem,$dst\t! int & 32-bit mask -> long\n\t"
             "SET    $mask,$tmp\n\t"
             "AND    $dst,$tmp,$dst" %}
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -827,8 +827,8 @@
     case vmIntrinsics::_dsin:   __ sin  (calc_input, calc_result, tmp1, tmp2);              break;
     case vmIntrinsics::_dcos:   __ cos  (calc_input, calc_result, tmp1, tmp2);              break;
     case vmIntrinsics::_dtan:   __ tan  (calc_input, calc_result, tmp1, tmp2);              break;
-    case vmIntrinsics::_dlog:   __ log  (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
-    case vmIntrinsics::_dlog10: __ log10(calc_input, calc_result, LIR_OprFact::illegalOpr); break;
+    case vmIntrinsics::_dlog:   __ log  (calc_input, calc_result, tmp1);                    break;
+    case vmIntrinsics::_dlog10: __ log10(calc_input, calc_result, tmp1);                    break;
     default:                    ShouldNotReachHere();
   }
 
--- a/src/cpu/x86/vm/c1_LinearScan_x86.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/cpu/x86/vm/c1_LinearScan_x86.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -764,8 +764,6 @@
       break;
     }
 
-    case lir_log:
-    case lir_log10:
     case lir_abs:
     case lir_sqrt: {
       // Right argument appears to be unused
@@ -785,6 +783,30 @@
       break;
     }
 
+    case lir_log:
+    case lir_log10: {
+      // log and log10 needs one temporary fpu stack slot, so there is ontemporary
+      // registers stored in temp of the operation.
+      // the stack allocator must guarantee that the stack slots are really free,
+      // otherwise there might be a stack overflow.
+      assert(right->is_illegal(), "must be");
+      assert(left->is_fpu_register(), "must be");
+      assert(res->is_fpu_register(), "must be");
+      assert(op2->tmp_opr()->is_fpu_register(), "must be");
+
+      insert_free_if_dead(op2->tmp_opr());
+      insert_free_if_dead(res, left);
+      insert_exchange(left);
+      do_rename(left, res);
+
+      new_left = to_fpu_stack_top(res);
+      new_res = new_left;
+
+      op2->set_fpu_stack_size(sim()->stack_size());
+      assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
+      break;
+    }
+
 
     case lir_tan:
     case lir_sin:
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -1262,6 +1262,9 @@
                                         Register receiver, Register mdp,
                                         Register reg2,
                                         int start_row, Label& done) {
+  if (TypeProfileWidth == 0)
+    return;
+
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
   // Test this row for both the receiver and for null.
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -1272,6 +1272,9 @@
                                         Register receiver, Register mdp,
                                         Register reg2,
                                         int start_row, Label& done) {
+  if (TypeProfileWidth == 0)
+    return;
+
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
   // Test this row for both the receiver and for null.
--- a/src/os/windows/vm/os_windows.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -1526,7 +1526,8 @@
     case 5000: st->print(" Windows 2000"); break;
     case 5001: st->print(" Windows XP"); break;
     case 5002:
-    case 6000: {
+    case 6000:
+    case 6001: {
       // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
       // find out whether we are running on 64 bit processor or not.
       SYSTEM_INFO si;
@@ -1549,13 +1550,27 @@
           st->print(" Windows XP x64 Edition");
         else
             st->print(" Windows Server 2003 family");
-      } else { // os_vers == 6000
+      } else if (os_vers == 6000) {
         if (osvi.wProductType == VER_NT_WORKSTATION)
             st->print(" Windows Vista");
         else
             st->print(" Windows Server 2008");
         if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
             st->print(" , 64 bit");
+      } else if (os_vers == 6001) {
+        if (osvi.wProductType == VER_NT_WORKSTATION) {
+            st->print(" Windows 7");
+        } else {
+            // Unrecognized windows, print out its major and minor versions
+            st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+        }
+        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
+            st->print(" , 64 bit");
+      } else { // future os
+        // Unrecognized windows, print out its major and minor versions
+        st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
+            st->print(" , 64 bit");
       }
       break;
     }
--- a/src/share/vm/adlc/archDesc.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/adlc/archDesc.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -1031,7 +1031,8 @@
 //---------------------------addSUNcopyright-------------------------------
 // output SUN copyright info
 void ArchDesc::addSunCopyright(char* legal, int size, FILE *fp) {
-  fwrite(legal, size, 1, fp);
+  size_t count = fwrite(legal, 1, size, fp);
+  assert(count == (size_t) size, "copyright info truncated");
   fprintf(fp,"\n");
   fprintf(fp,"// Machine Generated File.  Do Not Edit!\n");
   fprintf(fp,"\n");
--- a/src/share/vm/c1/c1_LIR.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/c1/c1_LIR.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -567,8 +567,6 @@
     case lir_rem:
     case lir_sqrt:
     case lir_abs:
-    case lir_log:
-    case lir_log10:
     case lir_logic_and:
     case lir_logic_or:
     case lir_logic_xor:
@@ -644,13 +642,16 @@
 
     case lir_tan:
     case lir_sin:
-    case lir_cos: {
+    case lir_cos:
+    case lir_log:
+    case lir_log10: {
       assert(op->as_Op2() != NULL, "must be");
       LIR_Op2* op2 = (LIR_Op2*)op;
 
-      // sin and cos need two temporary fpu stack slots, so register
-      // two temp operands.  Register input operand as temp to
-      // guarantee that they do not overlap
+      // On x86 tan/sin/cos need two temporary fpu stack slots and
+      // log/log10 need one so handle opr2 and tmp as temp inputs.
+      // Register input operand as temp to guarantee that it doesn't
+      // overlap with the input.
       assert(op2->_info == NULL, "not used");
       assert(op2->_opr1->is_valid(), "used");
       do_input(op2->_opr1); do_temp(op2->_opr1);
--- a/src/share/vm/c1/c1_LIR.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/c1/c1_LIR.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -1840,8 +1840,8 @@
 
   void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
   void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
-  void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, tmp, to)); }
-  void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, tmp, to)); }
+  void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, LIR_OprFact::illegalOpr, to, tmp)); }
+  void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
   void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
   void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
   void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
--- a/src/share/vm/classfile/classFileParser.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -2547,6 +2547,7 @@
                                                     KlassHandle host_klass,
                                                     GrowableArray<Handle>* cp_patches,
                                                     symbolHandle& parsed_name,
+                                                    bool verify,
                                                     TRAPS) {
   // So that JVMTI can cache class file in the state before retransformable agents
   // have modified it
@@ -2591,7 +2592,7 @@
   instanceKlassHandle nullHandle;
 
   // Figure out whether we can skip format checking (matching classic VM behavior)
-  _need_verify = Verifier::should_verify_for(class_loader());
+  _need_verify = Verifier::should_verify_for(class_loader(), verify);
 
   // Set the verify flag in stream
   cfs->set_verify(_need_verify);
@@ -3210,6 +3211,7 @@
 
     // Fill in information already parsed
     this_klass->set_access_flags(access_flags);
+    this_klass->set_should_verify_class(verify);
     jint lh = Klass::instance_layout_helper(instance_size, false);
     this_klass->set_layout_helper(lh);
     assert(this_klass->oop_is_instance(), "layout is correct");
@@ -3229,6 +3231,12 @@
       this_klass->set_has_final_method();
     }
     this_klass->set_method_ordering(method_ordering());
+    // The instanceKlass::_methods_jmethod_ids cache and the
+    // instanceKlass::_methods_cached_itable_indices cache are
+    // both managed on the assumption that the initial cache
+    // size is equal to the number of methods in the class. If
+    // that changes, then instanceKlass::idnum_can_increment()
+    // has to be changed accordingly.
     this_klass->set_initial_method_idnum(methods->length());
     this_klass->set_name(cp->klass_name_at(this_class_index));
     if (LinkWellKnownClasses || is_anonymous())  // I am well known to myself
--- a/src/share/vm/classfile/classFileParser.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/classFileParser.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -260,9 +260,10 @@
                                      Handle class_loader,
                                      Handle protection_domain,
                                      symbolHandle& parsed_name,
+                                     bool verify,
                                      TRAPS) {
     KlassHandle no_host_klass;
-    return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, THREAD);
+    return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD);
   }
   instanceKlassHandle parseClassFile(symbolHandle name,
                                      Handle class_loader,
@@ -270,6 +271,7 @@
                                      KlassHandle host_klass,
                                      GrowableArray<Handle>* cp_patches,
                                      symbolHandle& parsed_name,
+                                     bool verify,
                                      TRAPS);
 
   // Verifier checks
--- a/src/share/vm/classfile/classLoader.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/classLoader.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -874,6 +874,7 @@
                                                        class_loader,
                                                        protection_domain,
                                                        parsed_name,
+                                                       false,
                                                        CHECK_(h));
 
     // add to package table
--- a/src/share/vm/classfile/javaClasses.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -252,7 +252,7 @@
   typeArrayOop value  = java_lang_String::value(obj);
   int          offset = java_lang_String::offset(obj);
   int          length = java_lang_String::length(obj);
-  jchar* base = value->char_at_addr(offset);
+  jchar* base = (length == 0) ? NULL : value->char_at_addr(offset);
   symbolOop sym = SymbolTable::lookup_unicode(base, length, THREAD);
   return symbolHandle(THREAD, sym);
 }
@@ -261,7 +261,7 @@
   typeArrayOop value  = java_lang_String::value(java_string);
   int          offset = java_lang_String::offset(java_string);
   int          length = java_lang_String::length(java_string);
-  jchar* base = value->char_at_addr(offset);
+  jchar* base = (length == 0) ? NULL : value->char_at_addr(offset);
   return SymbolTable::probe_unicode(base, length);
 }
 
--- a/src/share/vm/classfile/systemDictionary.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -970,6 +970,7 @@
                                                              host_klass,
                                                              cp_patches,
                                                              parsed_name,
+                                                             true,
                                                              THREAD);
 
 
@@ -1025,6 +1026,7 @@
                                                Handle class_loader,
                                                Handle protection_domain,
                                                ClassFileStream* st,
+                                               bool verify,
                                                TRAPS) {
 
   // Classloaders that support parallelism, e.g. bootstrap classloader,
@@ -1055,6 +1057,7 @@
                                                              class_loader,
                                                              protection_domain,
                                                              parsed_name,
+                                                             verify,
                                                              THREAD);
 
   const char* pkg = "java/";
--- a/src/share/vm/classfile/systemDictionary.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -259,7 +259,9 @@
                                TRAPS);
 
   // Resolve from stream (called by jni_DefineClass and JVM_DefineClass)
-  static klassOop resolve_from_stream(symbolHandle class_name, Handle class_loader, Handle protection_domain, ClassFileStream* st, TRAPS);
+  static klassOop resolve_from_stream(symbolHandle class_name, Handle class_loader,
+                                      Handle protection_domain,
+                                      ClassFileStream* st, bool verify, TRAPS);
 
   // Lookup an already loaded class. If not found NULL is returned.
   static klassOop find(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS);
--- a/src/share/vm/classfile/verifier.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/verifier.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -53,8 +53,8 @@
 
 // Methods in Verifier
 
-bool Verifier::should_verify_for(oop class_loader) {
-  return class_loader == NULL ?
+bool Verifier::should_verify_for(oop class_loader, bool should_verify_class) {
+  return (class_loader == NULL || !should_verify_class) ?
     BytecodeVerificationLocal : BytecodeVerificationRemote;
 }
 
@@ -68,7 +68,7 @@
   return !need_verify;
 }
 
-bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, TRAPS) {
+bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) {
   ResourceMark rm(THREAD);
   HandleMark hm;
 
@@ -81,7 +81,7 @@
   // If the class should be verified, first see if we can use the split
   // verifier.  If not, or if verification fails and FailOverToOldVerifier
   // is set, then call the inference verifier.
-  if (is_eligible_for_verification(klass)) {
+  if (is_eligible_for_verification(klass, should_verify_class)) {
     if (TraceClassInitialization) {
       tty->print_cr("Start class verification for: %s", klassName);
     }
@@ -141,12 +141,13 @@
   }
 }
 
-bool Verifier::is_eligible_for_verification(instanceKlassHandle klass) {
+bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
   symbolOop name = klass->name();
   klassOop refl_magic_klass = SystemDictionary::reflect_magic_klass();
 
-  return (should_verify_for(klass->class_loader()) &&
+  return (should_verify_for(klass->class_loader(), should_verify_class) &&
     // return if the class is a bootstrapping class
+    // or defineClass specified not to verify by default (flags override passed arg)
     // We need to skip the following four for bootstraping
     name != vmSymbols::java_lang_Object() &&
     name != vmSymbols::java_lang_Class() &&
--- a/src/share/vm/classfile/verifier.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/classfile/verifier.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -34,16 +34,18 @@
    * Otherwise, no exception is thrown and the return indicates the
    * error.
    */
-  static bool verify(instanceKlassHandle klass, Mode mode, TRAPS);
+  static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS);
 
-  // Return false if the class is loaded by the bootstrap loader.
-  static bool should_verify_for(oop class_loader);
+  // Return false if the class is loaded by the bootstrap loader,
+  // or if defineClass was called requesting skipping verification
+  // -Xverify:all/none override this value
+  static bool should_verify_for(oop class_loader, bool should_verify_class);
 
   // Relax certain verifier checks to enable some broken 1.1 apps to run on 1.2.
   static bool relax_verify_for(oop class_loader);
 
  private:
-  static bool is_eligible_for_verification(instanceKlassHandle klass);
+  static bool is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class);
   static symbolHandle inference_verify(
     instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS);
 };
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -302,9 +302,9 @@
     if (cur->isHumongous()) {
       return MemRegion(last_start, end);
     }
-    cur->reset_zero_fill();
     assert(cur == _regions.top(), "Should be top");
     if (!cur->is_empty()) break;
+    cur->reset_zero_fill();
     shrink_bytes -= cur->capacity();
     num_regions_deleted++;
     _regions.pop();
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -2752,7 +2752,6 @@
   cm->revisit_klass_stack()->push(k);
 }
 
-#if ( defined(COMPILER1) || defined(COMPILER2) )
 void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
   cm->revisit_mdo_stack()->push(p);
 }
@@ -2778,7 +2777,6 @@
     follow_stack(cm);
   }
 }
-#endif //  ( COMPILER1 || COMPILER2 )
 
 
 #ifdef VALIDATE_MARK_SWEEP
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -73,7 +73,6 @@
   follow_stack();
 }
 
-#if ( defined(COMPILER1) || defined(COMPILER2) )
 void MarkSweep::revisit_mdo(DataLayout* p) {
   _revisit_mdo_stack->push(p);
 }
@@ -92,7 +91,6 @@
   }
   follow_stack();
 }
-#endif //  ( COMPILER1 || COMPILER2 )
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
 
--- a/src/share/vm/oops/instanceKlass.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -110,7 +110,7 @@
   // 1) Verify the bytecodes
   Verifier::Mode mode =
     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
-  return Verifier::verify(this_oop, mode, CHECK_false);
+  return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
 }
 
 
@@ -967,33 +967,78 @@
 
 
 // Lookup or create a jmethodID.
-// This code can be called by the VM thread.  For this reason it is critical that
-// there are no blocking operations (safepoints) while the lock is held -- or a
-// deadlock can occur.
-jmethodID instanceKlass::jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h) {
+// This code is called by the VMThread and JavaThreads so the
+// locking has to be done very carefully to avoid deadlocks
+// and/or other cache consistency problems.
+//
+jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
   size_t idnum = (size_t)method_h->method_idnum();
   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
   size_t length = 0;
   jmethodID id = NULL;
-  // array length stored in first element, other elements offset by one
-  if (jmeths == NULL ||                         // If there is no jmethodID array,
-      (length = (size_t)jmeths[0]) <= idnum ||  // or if it is too short,
-      (id = jmeths[idnum+1]) == NULL) {         // or if this jmethodID isn't allocated
+
+  // We use a double-check locking idiom here because this cache is
+  // performance sensitive. In the normal system, this cache only
+  // transitions from NULL to non-NULL which is safe because we use
+  // release_set_methods_jmethod_ids() to advertise the new cache.
+  // A partially constructed cache should never be seen by a racing
+  // thread. We also use release_store_ptr() to save a new jmethodID
+  // in the cache so a partially constructed jmethodID should never be
+  // seen either. Cache reads of existing jmethodIDs proceed without a
+  // lock, but cache writes of a new jmethodID requires uniqueness and
+  // creation of the cache itself requires no leaks so a lock is
+  // generally acquired in those two cases.
+  //
+  // If the RedefineClasses() API has been used, then this cache can
+  // grow and we'll have transitions from non-NULL to bigger non-NULL.
+  // Cache creation requires no leaks and we require safety between all
+  // cache accesses and freeing of the old cache so a lock is generally
+  // acquired when the RedefineClasses() API has been used.
 
-    // Do all the safepointing things (allocations) before grabbing the lock.
-    // These allocations will have to be freed if they are unused.
+  if (jmeths != NULL) {
+    // the cache already exists
+    if (!ik_h->idnum_can_increment()) {
+      // the cache can't grow so we can just get the current values
+      get_jmethod_id_length_value(jmeths, idnum, &length, &id);
+    } else {
+      // cache can grow so we have to be more careful
+      if (Threads::number_of_threads() == 0 ||
+          SafepointSynchronize::is_at_safepoint()) {
+        // we're single threaded or at a safepoint - no locking needed
+        get_jmethod_id_length_value(jmeths, idnum, &length, &id);
+      } else {
+        MutexLocker ml(JmethodIdCreation_lock);
+        get_jmethod_id_length_value(jmeths, idnum, &length, &id);
+      }
+    }
+  }
+  // implied else:
+  // we need to allocate a cache so default length and id values are good
 
-    // Allocate a new array of methods.
+  if (jmeths == NULL ||   // no cache yet
+      length <= idnum ||  // cache is too short
+      id == NULL) {       // cache doesn't contain entry
+
+    // This function can be called by the VMThread so we have to do all
+    // things that might block on a safepoint before grabbing the lock.
+    // Otherwise, we can deadlock with the VMThread or have a cache
+    // consistency issue. These vars keep track of what we might have
+    // to free after the lock is dropped.
+    jmethodID  to_dealloc_id     = NULL;
+    jmethodID* to_dealloc_jmeths = NULL;
+
+    // may not allocate new_jmeths or use it if we allocate it
     jmethodID* new_jmeths = NULL;
     if (length <= idnum) {
-      // A new array will be needed (unless some other thread beats us to it)
+      // allocate a new cache that might be used
       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
       new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
-      new_jmeths[0] =(jmethodID)size;  // array size held in the first element
+      // cache size is stored in element[0], other elements offset by one
+      new_jmeths[0] = (jmethodID)size;
     }
 
-    // Allocate a new method ID.
+    // allocate a new jmethodID that might be used
     jmethodID new_id = NULL;
     if (method_h->is_old() && !method_h->is_obsolete()) {
       // The method passed in is old (but not obsolete), we need to use the current version
@@ -1007,63 +1052,111 @@
       new_id = JNIHandles::make_jmethod_id(method_h);
     }
 
-    if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) {
-      // No need and unsafe to lock the JmethodIdCreation_lock at safepoint.
-      id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths);
+    if (Threads::number_of_threads() == 0 ||
+        SafepointSynchronize::is_at_safepoint()) {
+      // we're single threaded or at a safepoint - no locking needed
+      id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
+                                          &to_dealloc_id, &to_dealloc_jmeths);
     } else {
       MutexLocker ml(JmethodIdCreation_lock);
-      id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths);
+      id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
+                                          &to_dealloc_id, &to_dealloc_jmeths);
+    }
+
+    // The lock has been dropped so we can free resources.
+    // Free up either the old cache or the new cache if we allocated one.
+    if (to_dealloc_jmeths != NULL) {
+      FreeHeap(to_dealloc_jmeths);
+    }
+    // free up the new ID since it wasn't needed
+    if (to_dealloc_id != NULL) {
+      JNIHandles::destroy_jmethod_id(to_dealloc_id);
     }
   }
   return id;
 }
 
 
-jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, size_t idnum,
-                                        jmethodID new_id, jmethodID* new_jmeths) {
-  // Retry lookup after we got the lock or ensured we are at safepoint
+// Common code to fetch the jmethodID from the cache or update the
+// cache with the new jmethodID. This function should never do anything
+// that causes the caller to go to a safepoint or we can deadlock with
+// the VMThread or have cache consistency issues.
+//
+jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
+            instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
+            jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
+            jmethodID** to_dealloc_jmeths_p) {
+  assert(new_id != NULL, "sanity check");
+  assert(to_dealloc_id_p != NULL, "sanity check");
+  assert(to_dealloc_jmeths_p != NULL, "sanity check");
+  assert(Threads::number_of_threads() == 0 ||
+         SafepointSynchronize::is_at_safepoint() ||
+         JmethodIdCreation_lock->owned_by_self(), "sanity check");
+
+  // reacquire the cache - we are locked, single threaded or at a safepoint
   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
-  jmethodID  id                = NULL;
-  jmethodID  to_dealloc_id     = NULL;
-  jmethodID* to_dealloc_jmeths = NULL;
-  size_t     length;
+  jmethodID  id     = NULL;
+  size_t     length = 0;
 
-  if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) {
+  if (jmeths == NULL ||                         // no cache yet
+      (length = (size_t)jmeths[0]) <= idnum) {  // cache is too short
     if (jmeths != NULL) {
-      // We have grown the array: copy the existing entries, and delete the old array
+      // copy any existing entries from the old cache
       for (size_t index = 0; index < length; index++) {
         new_jmeths[index+1] = jmeths[index+1];
       }
-      to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one
+      *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
     }
     ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
   } else {
+    // fetch jmethodID (if any) from the existing cache
     id = jmeths[idnum+1];
-    to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one
+    *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
   }
   if (id == NULL) {
+    // No matching jmethodID in the existing cache or we have a new
+    // cache or we just grew the cache. This cache write is done here
+    // by the first thread to win the foot race because a jmethodID
+    // needs to be unique once it is generally available.
     id = new_id;
-    jmeths[idnum+1] = id;  // install the new method ID
+
+    // The jmethodID cache can be read while unlocked so we have to
+    // make sure the new jmethodID is complete before installing it
+    // in the cache.
+    OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
   } else {
-    to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation
-  }
-
-  // Free up unneeded or no longer needed resources
-  FreeHeap(to_dealloc_jmeths);
-  if (to_dealloc_id != NULL) {
-    JNIHandles::destroy_jmethod_id(to_dealloc_id);
+    *to_dealloc_id_p = new_id; // save new id for later delete
   }
   return id;
 }
 
 
+// Common code to get the jmethodID cache length and the jmethodID
+// value at index idnum if there is one.
+//
+void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
+       size_t idnum, size_t *length_p, jmethodID* id_p) {
+  assert(cache != NULL, "sanity check");
+  assert(length_p != NULL, "sanity check");
+  assert(id_p != NULL, "sanity check");
+
+  // cache size is stored in element[0], other elements offset by one
+  *length_p = (size_t)cache[0];
+  if (*length_p <= idnum) {  // cache is too short
+    *id_p = NULL;
+  } else {
+    *id_p = cache[idnum+1];  // fetch jmethodID (if any)
+  }
+}
+
+
 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
   size_t idnum = (size_t)method->method_idnum();
   jmethodID* jmeths = methods_jmethod_ids_acquire();
   size_t length;                                // length assigned as debugging crumb
   jmethodID id = NULL;
-  if (jmeths != NULL &&                         // If there is a jmethodID array,
+  if (jmeths != NULL &&                         // If there is a cache
       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
   }
@@ -1074,18 +1167,35 @@
 // Cache an itable index
 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
   int* indices = methods_cached_itable_indices_acquire();
-  if (indices == NULL ||                         // If there is no index array,
-      ((size_t)indices[0]) <= idnum) {           // or if it is too short
-    // Lock before we allocate the array so we don't leak
+  int* to_dealloc_indices = NULL;
+
+  // We use a double-check locking idiom here because this cache is
+  // performance sensitive. In the normal system, this cache only
+  // transitions from NULL to non-NULL which is safe because we use
+  // release_set_methods_cached_itable_indices() to advertise the
+  // new cache. A partially constructed cache should never be seen
+  // by a racing thread. Cache reads and writes proceed without a
+  // lock, but creation of the cache itself requires no leaks so a
+  // lock is generally acquired in that case.
+  //
+  // If the RedefineClasses() API has been used, then this cache can
+  // grow and we'll have transitions from non-NULL to bigger non-NULL.
+  // Cache creation requires no leaks and we require safety between all
+  // cache accesses and freeing of the old cache so a lock is generally
+  // acquired when the RedefineClasses() API has been used.
+
+  if (indices == NULL || idnum_can_increment()) {
+    // we need a cache or the cache can grow
     MutexLocker ml(JNICachedItableIndex_lock);
-    // Retry lookup after we got the lock
+    // reacquire the cache to see if another thread already did the work
     indices = methods_cached_itable_indices_acquire();
     size_t length = 0;
-    // array length stored in first element, other elements offset by one
+    // cache size is stored in element[0], other elements offset by one
     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
       int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
-      // Copy the existing entries, if any
+      new_indices[0] = (int)size;
+      // copy any existing entries
       size_t i;
       for (i = 0; i < length; i++) {
         new_indices[i+1] = indices[i+1];
@@ -1095,15 +1205,32 @@
         new_indices[i+1] = -1;
       }
       if (indices != NULL) {
-        FreeHeap(indices);  // delete any old indices
+        // We have an old cache to delete so save it for after we
+        // drop the lock.
+        to_dealloc_indices = indices;
       }
       release_set_methods_cached_itable_indices(indices = new_indices);
     }
+
+    if (idnum_can_increment()) {
+      // this cache can grow so we have to write to it safely
+      indices[idnum+1] = index;
+    }
   } else {
     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   }
-  // This is a cache, if there is a race to set it, it doesn't matter
-  indices[idnum+1] = index;
+
+  if (!idnum_can_increment()) {
+    // The cache cannot grow and this JNI itable index value does not
+    // have to be unique like a jmethodID. If there is a race to set it,
+    // it doesn't matter.
+    indices[idnum+1] = index;
+  }
+
+  if (to_dealloc_indices != NULL) {
+    // we allocated a new cache so free the old one
+    FreeHeap(to_dealloc_indices);
+  }
 }
 
 
@@ -2299,6 +2426,11 @@
 
 // Add an information node that contains weak references to the
 // interesting parts of the previous version of the_class.
+// This is also where we clean out any unused weak references.
+// Note that while we delete nodes from the _previous_versions
+// array, we never delete the array itself until the klass is
+// unloaded. The has_been_redefined() query depends on that fact.
+//
 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
        BitMap* emcp_methods, int emcp_method_count) {
   assert(Thread::current()->is_VM_thread(),
--- a/src/share/vm/oops/instanceKlass.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/oops/instanceKlass.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -217,6 +217,7 @@
   bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
   bool            _rewritten;            // methods rewritten.
   bool            _has_nonstatic_fields; // for sizing with UseCompressedOops
+  bool            _should_verify_class;  // allow caching of preverification
   u2              _minor_version;        // minor version number of class file
   u2              _major_version;        // major version number of class file
   ClassState      _init_state;           // state of class
@@ -362,6 +363,10 @@
   int  get_init_state()                    { return _init_state; } // Useful for debugging
   bool is_rewritten() const                { return _rewritten; }
 
+  // defineClass specified verification
+  bool should_verify_class() const         { return _should_verify_class; }
+  void set_should_verify_class(bool value) { _should_verify_class = value; }
+
   // marking
   bool is_marked_dependent() const         { return _is_marked_dependent; }
   void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
@@ -460,6 +465,10 @@
   // RedefineClasses() support for previous versions:
   void add_previous_version(instanceKlassHandle ikh, BitMap *emcp_methods,
          int emcp_method_count);
+  // If the _previous_versions array is non-NULL, then this klass
+  // has been redefined at least once even if we aren't currently
+  // tracking a previous version.
+  bool has_been_redefined() const { return _previous_versions != NULL; }
   bool has_previous_version() const;
   void init_previous_versions() {
     _previous_versions = NULL;
@@ -501,9 +510,14 @@
   void set_bootstrap_method(oop mh)                   { oop_store(&_bootstrap_method, mh); }
 
   // jmethodID support
-  static jmethodID get_jmethod_id(instanceKlassHandle ik_h, size_t idnum,
-                                  jmethodID new_id, jmethodID* new_jmeths);
-  static jmethodID jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h);
+  static jmethodID get_jmethod_id(instanceKlassHandle ik_h,
+                     methodHandle method_h);
+  static jmethodID get_jmethod_id_fetch_or_update(instanceKlassHandle ik_h,
+                     size_t idnum, jmethodID new_id, jmethodID* new_jmeths,
+                     jmethodID* to_dealloc_id_p,
+                     jmethodID** to_dealloc_jmeths_p);
+  static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum,
+                size_t *length_p, jmethodID* id_p);
   jmethodID jmethod_id_or_null(methodOop method);
 
   // cached itable index support
@@ -749,6 +763,11 @@
   void set_init_thread(Thread *thread)  { _init_thread = thread; }
 
   u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
+  // The RedefineClasses() API can cause new method idnums to be needed
+  // which will cause the caches to grow. Safety requires different
+  // cache management logic if the caches can grow instead of just
+  // going from NULL to non-NULL.
+  bool idnum_can_increment() const      { return has_been_redefined(); }
   jmethodID* methods_jmethod_ids_acquire() const
          { return (jmethodID*)OrderAccess::load_ptr_acquire(&_methods_jmethod_ids); }
   void release_set_methods_jmethod_ids(jmethodID* jmeths)
--- a/src/share/vm/oops/methodOop.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/oops/methodOop.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -555,7 +555,7 @@
 
   // Get this method's jmethodID -- allocate if it doesn't exist
   jmethodID jmethod_id()                            { methodHandle this_h(this);
-                                                      return instanceKlass::jmethod_id_for_impl(method_holder(), this_h); }
+                                                      return instanceKlass::get_jmethod_id(method_holder(), this_h); }
 
   // Lookup the jmethodID for this method.  Return NULL if not found.
   // NOTE that this function can be called from a signal handler
--- a/src/share/vm/opto/escape.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/opto/escape.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -439,6 +439,11 @@
   Node *base = addp->in(AddPNode::Base)->uncast();
   if (base->is_top()) { // The AddP case #3 and #6.
     base = addp->in(AddPNode::Address)->uncast();
+    while (base->is_AddP()) {
+      // Case #6 (unsafe access) may have several chained AddP nodes.
+      assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
+      base = base->in(AddPNode::Address)->uncast();
+    }
     assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
            base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
            (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
--- a/src/share/vm/opto/graphKit.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -622,11 +622,13 @@
 
 //---------------------------PreserveReexecuteState----------------------------
 PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) {
+  assert(!kit->stopped(), "must call stopped() before");
   _kit    =    kit;
   _sp     =    kit->sp();
   _reexecute = kit->jvms()->_reexecute;
 }
 PreserveReexecuteState::~PreserveReexecuteState() {
+  if (_kit->stopped()) return;
   _kit->jvms()->_reexecute = _reexecute;
   _kit->set_sp(_sp);
 }
@@ -1086,7 +1088,7 @@
     alen = _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
   } else {
     alen = alloc->Ideal_length();
-    Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_aryptr(), &_gvn);
+    Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn);
     if (ccast != alen) {
       alen = _gvn.transform(ccast);
     }
@@ -1123,8 +1125,8 @@
     case T_OBJECT : {
       const Type *t = _gvn.type( value );
 
-      const TypeInstPtr* tp = t->isa_instptr();
-      if (tp != NULL && !tp->klass()->is_loaded()
+      const TypeOopPtr* tp = t->isa_oopptr();
+      if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()
           // Only for do_null_check, not any of its siblings:
           && !assert_null && null_control == NULL) {
         // Usually, any field access or invocation on an unloaded oop type
--- a/src/share/vm/opto/library_call.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/opto/library_call.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -3903,19 +3903,10 @@
     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
   }
 
-  // Cast to Object for arraycopy.
-  // We can't use the original CheckCastPP since it should be moved
-  // after the arraycopy to prevent stores flowing above it.
-  Node* new_obj = new(C, 2) CheckCastPPNode(alloc_obj->in(0), raw_obj,
-                                            TypeInstPtr::NOTNULL);
-  new_obj = _gvn.transform(new_obj);
-  // Substitute in the locally valid dest_oop.
-  replace_in_map(alloc_obj, new_obj);
-
   // Copy the fastest available way.
   // TODO: generate fields copies for small objects instead.
   Node* src  = obj;
-  Node* dest = new_obj;
+  Node* dest = alloc_obj;
   Node* size = _gvn.transform(obj_size);
 
   // Exclude the header but include array length to copy by 8 bytes words.
@@ -3961,7 +3952,7 @@
     int raw_adr_idx = Compile::AliasIdxRaw;
     post_barrier(control(),
                  memory(raw_adr_type),
-                 new_obj,
+                 alloc_obj,
                  no_particular_field,
                  raw_adr_idx,
                  no_particular_value,
@@ -3969,16 +3960,8 @@
                  false);
   }
 
-  // Move the original CheckCastPP after arraycopy.
-  _gvn.hash_delete(alloc_obj);
-  alloc_obj->set_req(0, control());
-  // Replace raw memory edge with new CheckCastPP to have a live oop
-  // at safepoints instead of raw value.
-  assert(new_obj->is_CheckCastPP() && new_obj->in(1) == alloc_obj->in(1), "sanity");
-  alloc_obj->set_req(1, new_obj);    // cast to the original type
-  _gvn.hash_find_insert(alloc_obj);  // put back into GVN table
-  // Restore in the locally valid dest_oop.
-  replace_in_map(new_obj, alloc_obj);
+  // Do not let reads from the cloned object float above the arraycopy.
+  insert_mem_bar(Op_MemBarCPUOrder);
 }
 
 //------------------------inline_native_clone----------------------------
@@ -4448,15 +4431,6 @@
     assert(init->is_complete(), "we just did this");
     assert(dest->is_CheckCastPP(), "sanity");
     assert(dest->in(0)->in(0) == init, "dest pinned");
-
-    // Cast to Object for arraycopy.
-    // We can't use the original CheckCastPP since it should be moved
-    // after the arraycopy to prevent stores flowing above it.
-    Node* new_obj = new(C, 2) CheckCastPPNode(dest->in(0), dest->in(1),
-                                              TypeInstPtr::NOTNULL);
-    dest = _gvn.transform(new_obj);
-    // Substitute in the locally valid dest_oop.
-    replace_in_map(original_dest, dest);
     adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
     // From this point on, every exit path is responsible for
     // initializing any non-copied parts of the object to zero.
@@ -4786,18 +4760,6 @@
   set_i_o(     _gvn.transform(result_i_o)    );
   set_memory(  _gvn.transform(result_memory), adr_type );
 
-  if (dest != original_dest) {
-    // Pin the "finished" array node after the arraycopy/zeroing operations.
-    _gvn.hash_delete(original_dest);
-    original_dest->set_req(0, control());
-    // Replace raw memory edge with new CheckCastPP to have a live oop
-    // at safepoints instead of raw value.
-    assert(dest->is_CheckCastPP() && dest->in(1) == original_dest->in(1), "sanity");
-    original_dest->set_req(1, dest);       // cast to the original type
-    _gvn.hash_find_insert(original_dest);  // put back into GVN table
-    // Restore in the locally valid dest_oop.
-    replace_in_map(dest, original_dest);
-  }
   // The memory edges above are precise in order to model effects around
   // array copies accurately to allow value numbering of field loads around
   // arraycopy.  Such field loads, both before and after, are common in Java
@@ -4808,7 +4770,9 @@
   // The next memory barrier is added to avoid it. If the arraycopy can be
   // optimized away (which it can, sometimes) then we can manually remove
   // the membar also.
-  if (InsertMemBarAfterArraycopy)
+  //
+  // Do not let reads from the cloned object float above the arraycopy.
+  if (InsertMemBarAfterArraycopy || alloc != NULL)
     insert_mem_bar(Op_MemBarCPUOrder);
 }
 
--- a/src/share/vm/opto/superword.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/opto/superword.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -990,8 +990,8 @@
 // (5) We know there is no dependence cycle, so there in no other case;
 // (6) Finally, all memory ops in another single pack should be moved in the same direction.
 //
-// To schedule a load pack: the memory edge of every loads in the pack must be
-// the same as the memory edge of the last executed load in the pack
+// To schedule a load pack, we use the memory state of either the first or the last load in
+// the pack, based on the dependence constraint.
 void SuperWord::co_locate_pack(Node_List* pk) {
   if (pk->at(0)->is_Store()) {
     MemNode* first     = executed_first(pk)->as_Mem();
@@ -1076,15 +1076,32 @@
       current = my_mem->as_Mem();
     } // end while
   } else if (pk->at(0)->is_Load()) { //load
-    // all use the memory state that the last executed load uses
-    LoadNode* last_load  = executed_last(pk)->as_Load();
-    Node* last_mem       = last_load->in(MemNode::Memory);
-    _igvn.hash_delete(last_mem);
-    // Give each load same memory state as last
+    // all loads in the pack should have the same memory state. By default,
+    // we use the memory state of the last load. However, if any load could
+    // not be moved down due to the dependence constraint, we use the memory
+    // state of the first load.
+    Node* last_mem  = executed_last(pk)->in(MemNode::Memory);
+    Node* first_mem = executed_first(pk)->in(MemNode::Memory);
+    bool schedule_last = true;
+    for (uint i = 0; i < pk->size(); i++) {
+      Node* ld = pk->at(i);
+      for (Node* current = last_mem; current != ld->in(MemNode::Memory);
+           current=current->in(MemNode::Memory)) {
+        assert(current != first_mem, "corrupted memory graph");
+        if(current->is_Mem() && !independent(current, ld)){
+          schedule_last = false; // a later store depends on this load
+          break;
+        }
+      }
+    }
+
+    Node* mem_input = schedule_last ? last_mem : first_mem;
+    _igvn.hash_delete(mem_input);
+    // Give each load the same memory state
     for (uint i = 0; i < pk->size(); i++) {
       LoadNode* ld = pk->at(i)->as_Load();
       _igvn.hash_delete(ld);
-      ld->set_req(MemNode::Memory, last_mem);
+      ld->set_req(MemNode::Memory, mem_input);
       _igvn._worklist.push(ld);
     }
   }
--- a/src/share/vm/opto/type.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/opto/type.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -2236,12 +2236,12 @@
 
 //------------------------------make-------------------------------------------
 const TypeOopPtr *TypeOopPtr::make(PTR ptr,
-                                   int offset) {
+                                   int offset, int instance_id) {
   assert(ptr != Constant, "no constant generic pointers");
   ciKlass*  k = ciKlassKlass::make();
   bool      xk = false;
   ciObject* o = NULL;
-  return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, InstanceBot))->hashcons();
+  return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id))->hashcons();
 }
 
 
@@ -2330,7 +2330,8 @@
 
   case OopPtr: {                 // Meeting to other OopPtrs
     const TypeOopPtr *tp = t->is_oopptr();
-    return make( meet_ptr(tp->ptr()), meet_offset(tp->offset()) );
+    int instance_id = meet_instance_id(tp->instance_id());
+    return make( meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id );
   }
 
   case InstPtr:                  // For these, flip the call around to cut down
@@ -2801,7 +2802,7 @@
 
   case OopPtr: {                // Meeting to OopPtrs
     // Found a OopPtr type vs self-InstPtr type
-    const TypePtr *tp = t->is_oopptr();
+    const TypeOopPtr *tp = t->is_oopptr();
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     switch (tp->ptr()) {
@@ -2812,8 +2813,10 @@
                   (ptr == Constant ? const_oop() : NULL), offset, instance_id);
     }
     case NotNull:
-    case BotPTR:
-      return TypeOopPtr::make(ptr, offset);
+    case BotPTR: {
+      int instance_id = meet_instance_id(tp->instance_id());
+      return TypeOopPtr::make(ptr, offset, instance_id);
+    }
     default: typerr(t);
     }
   }
@@ -3259,7 +3262,7 @@
 
   case OopPtr: {                // Meeting to OopPtrs
     // Found a OopPtr type vs self-AryPtr type
-    const TypePtr *tp = t->is_oopptr();
+    const TypeOopPtr *tp = t->is_oopptr();
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     switch (tp->ptr()) {
@@ -3270,8 +3273,10 @@
                   _ary, _klass, _klass_is_exact, offset, instance_id);
     }
     case BotPTR:
-    case NotNull:
-      return TypeOopPtr::make(ptr, offset);
+    case NotNull: {
+      int instance_id = meet_instance_id(tp->instance_id());
+      return TypeOopPtr::make(ptr, offset, instance_id);
+    }
     default: ShouldNotReachHere();
     }
   }
--- a/src/share/vm/opto/type.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/opto/type.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -714,7 +714,7 @@
   static const TypeOopPtr* make_from_constant(ciObject* o);
 
   // Make a generic (unclassed) pointer to an oop.
-  static const TypeOopPtr* make(PTR ptr, int offset);
+  static const TypeOopPtr* make(PTR ptr, int offset, int instance_id = InstanceBot);
 
   ciObject* const_oop()    const { return _const_oop; }
   virtual ciKlass* klass() const { return _klass;     }
--- a/src/share/vm/prims/jni.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/prims/jni.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -299,7 +299,8 @@
     }
   }
   klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader,
-                                                     Handle(), &st, CHECK_NULL);
+                                                     Handle(), &st, true,
+                                                     CHECK_NULL);
 
   if (TraceClassResolution && k != NULL) {
     trace_class_resolution(k);
--- a/src/share/vm/prims/jvm.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/prims/jvm.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -762,7 +762,11 @@
 }
 
 // common code for JVM_DefineClass() and JVM_DefineClassWithSource()
-static jclass jvm_define_class_common(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd, const char *source, TRAPS) {
+// and JVM_DefineClassWithSourceCond()
+static jclass jvm_define_class_common(JNIEnv *env, const char *name,
+                                      jobject loader, const jbyte *buf,
+                                      jsize len, jobject pd, const char *source,
+                                      jboolean verify, TRAPS) {
   if (source == NULL)  source = "__JVM_DefineClass__";
 
   assert(THREAD->is_Java_thread(), "must be a JavaThread");
@@ -803,6 +807,7 @@
   Handle protection_domain (THREAD, JNIHandles::resolve(pd));
   klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader,
                                                      protection_domain, &st,
+                                                     verify != 0,
                                                      CHECK_NULL);
 
   if (TraceClassResolution && k != NULL) {
@@ -816,16 +821,24 @@
 JVM_ENTRY(jclass, JVM_DefineClass(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd))
   JVMWrapper2("JVM_DefineClass %s", name);
 
-  return jvm_define_class_common(env, name, loader, buf, len, pd, NULL, THREAD);
+  return jvm_define_class_common(env, name, loader, buf, len, pd, NULL, true, THREAD);
 JVM_END
 
 
 JVM_ENTRY(jclass, JVM_DefineClassWithSource(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd, const char *source))
   JVMWrapper2("JVM_DefineClassWithSource %s", name);
 
-  return jvm_define_class_common(env, name, loader, buf, len, pd, source, THREAD);
-JVM_END
-
+  return jvm_define_class_common(env, name, loader, buf, len, pd, source, true, THREAD);
+JVM_END
+
+JVM_ENTRY(jclass, JVM_DefineClassWithSourceCond(JNIEnv *env, const char *name,
+                                                jobject loader, const jbyte *buf,
+                                                jsize len, jobject pd,
+                                                const char *source, jboolean verify))
+  JVMWrapper2("JVM_DefineClassWithSourceCond %s", name);
+
+  return jvm_define_class_common(env, name, loader, buf, len, pd, source, verify, THREAD);
+JVM_END
 
 JVM_ENTRY(jclass, JVM_FindLoadedClass(JNIEnv *env, jobject loader, jstring name))
   JVMWrapper("JVM_FindLoadedClass");
--- a/src/share/vm/prims/jvm.h	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/prims/jvm.h	Fri Sep 25 12:17:06 2009 -0700
@@ -417,6 +417,17 @@
                           const jbyte *buf, jsize len, jobject pd,
                           const char *source);
 
+/* Define a class with a source with conditional verification (added HSX 14)
+ * -Xverify:all will verify anyway, -Xverify:none will not verify,
+ * -Xverify:remote (default) will obey this conditional
+ * i.e. true = should_verify_class
+ */
+JNIEXPORT jclass JNICALL
+JVM_DefineClassWithSourceCond(JNIEnv *env, const char *name,
+                              jobject loader, const jbyte *buf,
+                              jsize len, jobject pd, const char *source,
+                              jboolean verify);
+
 /* Define a class with a source (MLVM) */
 JNIEXPORT jclass JNICALL
 JVM_DefineClassWithCP(JNIEnv *env, const char *name, jobject loader,
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -933,7 +933,7 @@
       // description.
       RedefineVerifyMark rvm(&the_class, &scratch_class, state);
       Verifier::verify(
-        scratch_class, Verifier::ThrowException, THREAD);
+        scratch_class, Verifier::ThrowException, true, THREAD);
     }
 
     if (HAS_PENDING_EXCEPTION) {
@@ -959,7 +959,7 @@
       // verify what we have done during constant pool merging
       {
         RedefineVerifyMark rvm(&the_class, &scratch_class, state);
-        Verifier::verify(scratch_class, Verifier::ThrowException, THREAD);
+        Verifier::verify(scratch_class, Verifier::ThrowException, true, THREAD);
       }
 
       if (HAS_PENDING_EXCEPTION) {
--- a/src/share/vm/runtime/safepoint.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -769,9 +769,23 @@
   // to grab the Threads_lock which we own here, so a thread cannot be
   // resumed during safepoint synchronization.
 
-  // We check with locking because another thread that has not yet
-  // synchronized may be trying to suspend this one.
-  bool is_suspended = _thread->is_any_suspended_with_lock();
+  // We check to see if this thread is suspended without locking to
+  // avoid deadlocking with a third thread that is waiting for this
+  // thread to be suspended. The third thread can notice the safepoint
+  // that we're trying to start at the beginning of its SR_lock->wait()
+  // call. If that happens, then the third thread will block on the
+  // safepoint while still holding the underlying SR_lock. We won't be
+  // able to get the SR_lock and we'll deadlock.
+  //
+  // We don't need to grab the SR_lock here for two reasons:
+  // 1) The suspend flags are both volatile and are set with an
+  //    Atomic::cmpxchg() call so we should see the suspended
+  //    state right away.
+  // 2) We're being called from the safepoint polling loop; if
+  //    we don't see the suspended state on this iteration, then
+  //    we'll come around again.
+  //
+  bool is_suspended = _thread->is_ext_suspended();
   if (is_suspended) {
     roll_forward(_at_safepoint);
     return;
--- a/src/share/vm/runtime/thread.cpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/runtime/thread.cpp	Fri Sep 25 12:17:06 2009 -0700
@@ -1942,7 +1942,7 @@
 
   MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
 
-  assert(!this->is_any_suspended(),
+  assert(!this->is_ext_suspended(),
     "a thread trying to self-suspend should not already be suspended");
 
   if (this->is_suspend_equivalent()) {
--- a/src/share/vm/runtime/thread.hpp	Tue Sep 22 14:06:10 2009 -0700
+++ b/src/share/vm/runtime/thread.hpp	Fri Sep 25 12:17:06 2009 -0700
@@ -967,11 +967,6 @@
     return (_suspend_flags & _ext_suspended) != 0;
   }
 
-  // legacy method that checked for either external suspension or vm suspension
-  bool is_any_suspended() const {
-    return is_ext_suspended();
-  }
-
   bool is_external_suspend_with_lock() const {
     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
     return is_external_suspend();
@@ -997,10 +992,6 @@
     return ret;
   }
 
-  bool is_any_suspended_with_lock() const {
-    MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
-    return is_any_suspended();
-  }
   // utility methods to see if we are doing some kind of suspension
   bool is_being_ext_suspended() const            {
     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
--- a/test/compiler/6636138/Test1.java	Tue Sep 22 14:06:10 2009 -0700
+++ b/test/compiler/6636138/Test1.java	Fri Sep 25 12:17:06 2009 -0700
@@ -45,7 +45,7 @@
         for (int i = 0; i < src.length; i++) {
             if (src[i] != ref[i]) {
                 System.out.println("Error: src and ref don't match at " + i);
-                System.exit(-1);
+                System.exit(97);
             }
         }
     }
--- a/test/compiler/6636138/Test2.java	Tue Sep 22 14:06:10 2009 -0700
+++ b/test/compiler/6636138/Test2.java	Fri Sep 25 12:17:06 2009 -0700
@@ -51,7 +51,7 @@
             int value = (i-1 + src.length)%src.length; // correct value after shifting
                 if (src[i] != value) {
                     System.out.println("Error: src["+i+"] should be "+ value + " instead of " + src[i]);
-                    System.exit(-1);
+                    System.exit(97);
                 }
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6855215/Test6855215.java	Fri Sep 25 12:17:06 2009 -0700
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6855215
+ * @summary Calculation error (NaN) after about 1500 calculations
+ *
+ * @run main/othervm -Xbatch -XX:UseSSE=0 Test6855215
+ */
+
+public class Test6855215 {
+    private double m;
+    private double b;
+
+    public static double log10(double x) {
+        return Math.log(x) / Math.log(10);
+    }
+
+    void calcMapping(double xmin, double xmax, double ymin, double ymax) {
+        m = (ymax - ymin) / (log10(xmax) - log10(xmin));
+        b = (log10(xmin) * ymax - log10(xmax) * ymin);
+    }
+
+    public static void main(String[] args) {
+        Test6855215 c = new Test6855215();
+        for (int i = 0; i < 30000; i++) {
+            c.calcMapping(91, 121, 177, 34);
+            if (c.m != c.m) {
+                throw new InternalError();
+            }
+        }
+    }
+}
--- a/test/compiler/6865031/Test.java	Tue Sep 22 14:06:10 2009 -0700
+++ b/test/compiler/6865031/Test.java	Fri Sep 25 12:17:06 2009 -0700
@@ -26,7 +26,7 @@
  * @test
  * @bug 6865031
  * @summary Application gives bad result (throws bad exception) with compressed oops
- * @run main/othervm -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:-LoopUnswitching -XX:CompileCommand=inline,AbstractMemoryEfficientList.equals Test hello goodbye
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:-LoopUnswitching -XX:CompileCommand=inline,AbstractMemoryEfficientList.equals Test hello goodbye
  */
 
 import java.lang.ref.ReferenceQueue;