changeset 2167:aa4b04b68652

Merge
author never
date Fri, 21 Jan 2011 13:03:13 -0800
parents a7367756024b (diff) 403dc4c1d7f5 (current diff)
children e4fee0bdaa85
files src/share/vm/c1/c1_GraphBuilder.cpp src/share/vm/c1/c1_LIRGenerator.cpp
diffstat 282 files changed, 6775 insertions(+), 4521 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Jan 21 13:01:02 2011 -0800
+++ b/.hgtags	Fri Jan 21 13:03:13 2011 -0800
@@ -134,4 +134,11 @@
 5484e7c53fa7da5e869902437ee08a9ae10c1c69 jdk7-b119
 f5603a6e50422046ebc0d2f1671d55cb8f1bf1e9 jdk7-b120
 3f3653ab7af8dc1ddb9fa75dad56bf94f89e81a8 jdk7-b121
+3a548dc9cb456110ca8fc1514441a8c3bda0014d jdk7-b122
 5484e7c53fa7da5e869902437ee08a9ae10c1c69 hs20-b03
+9669f9b284108a9ee0a0ccbe215c37a130c9dcf5 jdk7-b123
+9669f9b284108a9ee0a0ccbe215c37a130c9dcf5 hs20-b04
+0a8e0d4345b37b71ec49dda08ee03b68c4f1b592 jdk7-b124
+0a8e0d4345b37b71ec49dda08ee03b68c4f1b592 hs20-b05
+e24ab3fa6aafad3efabbe7dba9918c5f461a20b1 jdk7-b125
+e24ab3fa6aafad3efabbe7dba9918c5f461a20b1 hs20-b06
--- a/agent/src/os/linux/libproc_impl.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/os/linux/libproc_impl.c	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/os/linux/ps_core.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/os/linux/ps_core.c	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/os/linux/ps_proc.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/os/linux/ps_proc.c	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/os/linux/symtab.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/os/linux/symtab.c	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/os/linux/symtab.h	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/os/linux/symtab.h	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -99,15 +99,8 @@
     long typeEntrySizeOffset;
     long typeEntryArrayStride;
 
-    typeEntryTypeNameOffset       = getLongValueFromProcess("gHotSpotVMTypeEntryTypeNameOffset");
-    typeEntrySuperclassNameOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySuperclassNameOffset");
-    typeEntryIsOopTypeOffset      = getLongValueFromProcess("gHotSpotVMTypeEntryIsOopTypeOffset");
-    typeEntryIsIntegerTypeOffset  = getLongValueFromProcess("gHotSpotVMTypeEntryIsIntegerTypeOffset");
-    typeEntryIsUnsignedOffset     = getLongValueFromProcess("gHotSpotVMTypeEntryIsUnsignedOffset");
-    typeEntrySizeOffset           = getLongValueFromProcess("gHotSpotVMTypeEntrySizeOffset");
-    typeEntryArrayStride          = getLongValueFromProcess("gHotSpotVMTypeEntryArrayStride");
-
-    // Fetch the address of the VMTypeEntry*
+    // Fetch the address of the VMTypeEntry*. We get this symbol first
+    // and try to use it to make sure that symbol lookup is working.
     Address entryAddr = lookupInProcess("gHotSpotVMTypes");
     //    System.err.println("gHotSpotVMTypes address = " + entryAddr);
     // Dereference this once to get the pointer to the first VMTypeEntry
@@ -118,6 +111,14 @@
       throw new RuntimeException("gHotSpotVMTypes was not initialized properly in the remote process; can not continue");
     }
 
+    typeEntryTypeNameOffset       = getLongValueFromProcess("gHotSpotVMTypeEntryTypeNameOffset");
+    typeEntrySuperclassNameOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySuperclassNameOffset");
+    typeEntryIsOopTypeOffset      = getLongValueFromProcess("gHotSpotVMTypeEntryIsOopTypeOffset");
+    typeEntryIsIntegerTypeOffset  = getLongValueFromProcess("gHotSpotVMTypeEntryIsIntegerTypeOffset");
+    typeEntryIsUnsignedOffset     = getLongValueFromProcess("gHotSpotVMTypeEntryIsUnsignedOffset");
+    typeEntrySizeOffset           = getLongValueFromProcess("gHotSpotVMTypeEntrySizeOffset");
+    typeEntryArrayStride          = getLongValueFromProcess("gHotSpotVMTypeEntryArrayStride");
+
     // Start iterating down it until we find an entry with no name
     Address typeNameAddr = null;
     do {
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/COFFFileParser.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/COFFFileParser.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -122,10 +122,14 @@
       private MemoizedObject[] sectionHeaders;
       private MemoizedObject[] symbols;
 
+      // Init stringTable at decl time since other fields init'ed in the
+      // constructor need the String Table.
       private MemoizedObject stringTable = new MemoizedObject() {
           public Object computeValue() {
+            // the String Table follows the Symbol Table
             int ptr = getPointerToSymbolTable();
             if (ptr == 0) {
+              // no Symbol Table so no String Table
               return new StringTable(0);
             } else {
               return new StringTable(ptr + SYMBOL_SIZE * getNumberOfSymbols());
@@ -140,6 +144,8 @@
         timeDateStamp = readInt();
         pointerToSymbolTable = readInt();
         numberOfSymbols = readInt();
+        // String Table can be accessed at this point because
+        // pointerToSymbolTable and numberOfSymbols fields are set.
         sizeOfOptionalHeader = readShort();
         characteristics = readShort();
 
@@ -222,6 +228,8 @@
         private MemoizedObject windowsSpecificFields;
         private MemoizedObject dataDirectories;
 
+        // We use an offset of 2 because OptionalHeaderStandardFieldsImpl doesn't
+        // include the 'magic' field.
         private static final int STANDARD_FIELDS_OFFSET = 2;
         private static final int PE32_WINDOWS_SPECIFIC_FIELDS_OFFSET = 28;
         private static final int PE32_DATA_DIRECTORIES_OFFSET = 96;
@@ -288,7 +296,7 @@
         private int sizeOfUninitializedData;
         private int addressOfEntryPoint;
         private int baseOfCode;
-        private int baseOfData;
+        private int baseOfData;  // only set in PE32
 
         OptionalHeaderStandardFieldsImpl(int offset,
                                          boolean isPE32Plus) {
@@ -301,7 +309,8 @@
           sizeOfUninitializedData = readInt();
           addressOfEntryPoint = readInt();
           baseOfCode = readInt();
-          if (isPE32Plus) {
+          if (!isPE32Plus) {
+            // only available in PE32
             baseOfData = readInt();
           }
         }
@@ -433,7 +442,10 @@
                 if (dir.getRVA() == 0 || dir.getSize() == 0) {
                   return null;
                 }
-                return new ExportDirectoryTableImpl(rvaToFileOffset(dir.getRVA()), dir.getSize());
+                // ExportDirectoryTableImpl needs both the RVA and the
+                // RVA converted to a file offset.
+                return new
+                    ExportDirectoryTableImpl(dir.getRVA(), dir.getSize());
               }
             };
 
@@ -526,6 +538,7 @@
       }
 
       class ExportDirectoryTableImpl implements ExportDirectoryTable {
+        private int exportDataDirRVA;
         private int offset;
         private int size;
 
@@ -548,8 +561,9 @@
         private MemoizedObject exportOrdinalTable;
         private MemoizedObject exportAddressTable;
 
-        ExportDirectoryTableImpl(int offset, int size) {
-          this.offset = offset;
+        ExportDirectoryTableImpl(int exportDataDirRVA, int size) {
+          this.exportDataDirRVA = exportDataDirRVA;
+          offset = rvaToFileOffset(exportDataDirRVA);
           this.size   = size;
           seek(offset);
           exportFlags = readInt();
@@ -595,6 +609,7 @@
 
           exportOrdinalTable = new MemoizedObject() {
               public Object computeValue() {
+                // number of ordinals is same as the number of name pointers
                 short[] ordinals = new short[getNumberOfNamePointers()];
                 seek(rvaToFileOffset(getOrdinalTableRVA()));
                 for (int i = 0; i < ordinals.length; i++) {
@@ -608,14 +623,18 @@
               public Object computeValue() {
                 int[] addresses = new int[getNumberOfAddressTableEntries()];
                 seek(rvaToFileOffset(getExportAddressTableRVA()));
-                // Must make two passes to avoid rvaToFileOffset
-                // destroying seek() position
+                // The Export Address Table values are a union of two
+                // possible values:
+                //   Export RVA - The address of the exported symbol when
+                //       loaded into memory, relative to the image base.
+                //       This value doesn't get converted into a file offset.
+                //   Forwarder RVA - The pointer to a null-terminated ASCII
+                //       string in the export section. This value gets
+                //       converted into a file offset because we have to
+                //       fetch the string.
                 for (int i = 0; i < addresses.length; i++) {
                   addresses[i] = readInt();
                 }
-                for (int i = 0; i < addresses.length; i++) {
-                  addresses[i] = rvaToFileOffset(addresses[i]);
-                }
                 return addresses;
               }
             };
@@ -648,11 +667,12 @@
 
         public boolean isExportAddressForwarder(short ordinal) {
           int addr = getExportAddress(ordinal);
-          return ((offset <= addr) && (addr < (offset + size)));
+          return ((exportDataDirRVA <= addr) &&
+              (addr < (exportDataDirRVA + size)));
         }
 
         public String getExportAddressForwarder(short ordinal) {
-          seek(getExportAddress(ordinal));
+          seek(rvaToFileOffset(getExportAddress(ordinal)));
           return readCString();
         }
 
@@ -3371,10 +3391,17 @@
               throw new COFFException(e);
             }
             // Look up in string table
+            // FIXME: this index value is assumed to be in the valid range
             name = getStringTable().get(index);
           } else {
             try {
-              name = new String(tmpName, US_ASCII);
+              int length = 0;
+              // find last non-NULL
+              for (; length < tmpName.length && tmpName[length] != '\0';) {
+                length++;
+              }
+              // don't include NULL chars in returned name String
+              name = new String(tmpName, 0, length, US_ASCII);
             } catch (UnsupportedEncodingException e) {
               throw new COFFException(e);
             }
@@ -3487,6 +3514,7 @@
                                 tmpName[5] << 16 |
                                 tmpName[6] <<  8 |
                                 tmpName[7]);
+            // FIXME: stringOffset is assumed to be in the valid range
             name = getStringTable().getAtOffset(stringOffset);
           }
 
@@ -3698,12 +3726,13 @@
 
         StringTable(int offset) {
           if (offset == 0) {
+            // no String Table
             strings = new COFFString[0];
             return;
           }
 
           seek(offset);
-          int length = readInt();
+          int length = readInt();  // length includes itself
           byte[] data = new byte[length - 4];
           int numBytesRead = readBytes(data);
           if (numBytesRead != data.length) {
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/DumpExports.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/DumpExports.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,35 +37,48 @@
 
     String filename = args[0];
     COFFFile file   = COFFFileParser.getParser().parse(filename);
-    ExportDirectoryTable exports =
-      file.getHeader().
-        getOptionalHeader().
-          getDataDirectories().
-            getExportDirectoryTable();
+
+    // get common point for both things we want to dump
+    OptionalHeaderDataDirectories dataDirs = file.getHeader().getOptionalHeader().
+        getDataDirectories();
+
+    // dump the header data directory for the Export Table:
+    DataDirectory dir = dataDirs.getExportTable();
+    System.out.println("Export table: RVA = " + dir.getRVA() + "/0x" +
+        Integer.toHexString(dir.getRVA()) + ", size = " + dir.getSize() + "/0x" +
+        Integer.toHexString(dir.getSize()));
+
+    System.out.println(file.getHeader().getNumberOfSections() + " sections in file");
+    for (int i = 1; i <= file.getHeader().getNumberOfSections(); i++) {
+      SectionHeader sec = file.getHeader().getSectionHeader(i);
+      System.out.println("  Section " + i + ":");
+      System.out.println("    Name = '" + sec.getName() + "'");
+      System.out.println("    VirtualSize = " + sec.getSize() + "/0x" +
+          Integer.toHexString(sec.getSize()));
+      System.out.println("    VirtualAddress = " + sec.getVirtualAddress() + "/0x" +
+          Integer.toHexString(sec.getVirtualAddress()));
+      System.out.println("    SizeOfRawData = " + sec.getSizeOfRawData() + "/0x" +
+          Integer.toHexString(sec.getSizeOfRawData()));
+      System.out.println("    PointerToRawData = " + sec.getPointerToRawData() + "/0x" +
+          Integer.toHexString(sec.getPointerToRawData()));
+    }
+
+    ExportDirectoryTable exports = dataDirs.getExportDirectoryTable();
     if (exports == null) {
       System.out.println("No exports found.");
     } else {
-      System.out.println(file.getHeader().getNumberOfSections() + " sections in file");
-      for (int i = 0; i < file.getHeader().getNumberOfSections(); i++) {
-        System.out.println("  Section " + i + ": " + file.getHeader().getSectionHeader(1 + i).getName());
-      }
-
-      DataDirectory dir = file.getHeader().getOptionalHeader().getDataDirectories().getExportTable();
-      System.out.println("Export table: RVA = 0x" + Integer.toHexString(dir.getRVA()) +
-                         ", size = 0x" + Integer.toHexString(dir.getSize()));
-
       System.out.println("DLL name: " + exports.getDLLName());
       System.out.println("Time/date stamp 0x" + Integer.toHexString(exports.getTimeDateStamp()));
       System.out.println("Major version 0x" + Integer.toHexString(exports.getMajorVersion() & 0xFFFF));
       System.out.println("Minor version 0x" + Integer.toHexString(exports.getMinorVersion() & 0xFFFF));
-      System.out.println(exports.getNumberOfNamePointers() + " functions found");
+      System.out.println(exports.getNumberOfNamePointers() + " exports found");
       for (int i = 0; i < exports.getNumberOfNamePointers(); i++) {
-        System.out.println("  0x" +
-                           Integer.toHexString(exports.getExportAddress(exports.getExportOrdinal(i))) +
-                           "  " +
-                           (exports.isExportAddressForwarder(exports.getExportOrdinal(i))  ?
-                            ("Forwarded to " + exports.getExportAddressForwarder(exports.getExportOrdinal(i))) :
-                            exports.getExportName(i)));
+        short ordinal = exports.getExportOrdinal(i);
+        System.out.print("[" + i + "] '" + exports.getExportName(i) + "': [" +
+            ordinal + "] = 0x" + Integer.toHexString(exports.getExportAddress(ordinal)));
+        System.out.println(exports.isExportAddressForwarder(ordinal)
+            ? "  Forwarded to '" + exports.getExportAddressForwarder(ordinal) + "'"
+            : "");
       }
     }
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/TestParser.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/TestParser.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,8 @@
       COFFHeader header = file.getHeader();
       int numSections = header.getNumberOfSections();
       System.out.println(numSections + " sections detected.");
-      for (int i = 0; i < numSections; i++) {
-        SectionHeader secHeader = header.getSectionHeader(1 + i);
+      for (int i = 1; i <= numSections; i++) {
+        SectionHeader secHeader = header.getSectionHeader(i);
         System.out.println(secHeader.getName());
       }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -506,7 +506,6 @@
     throw new DebuggerException("Unimplemented");
   }
 
-  private static String  DTFWHome;
   private static String  imagePath;
   private static String  symbolPath;
   private static boolean useNativeLookup;
@@ -514,81 +513,143 @@
     static {
 
      /*
-      * sawindbg.dll depends on dbgeng.dll which
-      * itself depends on dbghelp.dll. dbgeng.dll and dbghelp.dll.
-      * On systems newer than Windows 2000, these two .dlls are
-      * in the standard system directory so we will find them there.
-      * On Windows 2000 and earlier, these files do not exist.
-      * The user must download Debugging Tools For Windows (DTFW)
-      * and install it in order to use SA.
+      * sawindbg.dll depends on dbgeng.dll which itself depends on
+      * dbghelp.dll. We have to make sure that the dbgeng.dll and
+      * dbghelp.dll that we load are compatible with each other. We
+      * load both of those libraries from the same directory based
+      * on the theory that co-located libraries are compatible.
+      *
+      * On Windows 2000 and earlier, dbgeng.dll and dbghelp.dll were
+      * not included as part of the standard system directory. On
+      * systems newer than Windows 2000, dbgeng.dll and dbghelp.dll
+      * are included in the standard system directory. However, the
+      * versions included in the standard system directory may not
+      * be able to handle symbol information for the newer compilers.
+      *
+      * We search for and explicitly load the libraries using the
+      * following directory search order:
       *
-      * We have to make sure we use the two files from the same directory
-      * in case there are more than one copy on the system because
-      * one version of dbgeng.dll might not be compatible with a
-      * different version of dbghelp.dll.
-      * We first look for them in the directory pointed at by
-      * env. var. DEBUGGINGTOOLSFORWINDOWS, next in the default
-      * installation dir for DTFW, and lastly in the standard
-      * system directory.  We expect that that we will find
-      * them in the standard system directory on all systems
-      * newer than Windows 2000.
+      * - java.home/bin (same as $JAVA_HOME/jre/bin)
+      * - dir named by DEBUGGINGTOOLSFORWINDOWS environment variable
+      * - various "Debugging Tools For Windows" program directories
+      * - the system directory ($SYSROOT/system32)
+      *
+      * If SA is invoked with -Dsun.jvm.hotspot.loadLibrary.DEBUG=1,
+      * then debug messages about library loading are printed to
+      * System.err.
       */
-    String dirName = null;
-    DTFWHome = System.getenv("DEBUGGINGTOOLSFORWINDOWS");
 
-    if (DTFWHome == null) {
-      // See if we have the files in the default location.
-      String sysRoot = System.getenv("SYSTEMROOT");
-      DTFWHome = sysRoot + File.separator +
-          ".." + File.separator + "Program Files" +
-          File.separator + "Debugging Tools For Windows";
-    }
+    String dbgengPath   = null;
+    String dbghelpPath  = null;
+    String sawindbgPath = null;
+    List   searchList   = new ArrayList();
+
+    boolean loadLibraryDEBUG =
+        System.getProperty("sun.jvm.hotspot.loadLibrary.DEBUG") != null;
 
     {
-      String dbghelp = DTFWHome + File.separator + "dbghelp.dll";
-      String dbgeng = DTFWHome + File.separator + "dbgeng.dll";
-      File fhelp = new File(dbghelp);
-      File feng = new File(dbgeng);
-      if (fhelp.exists() && feng.exists()) {
-        // found both, we are happy.
-        // NOTE: The order of loads is important! If we load dbgeng.dll
-        // first, then the dependency - dbghelp.dll - will be loaded
-        // from usual DLL search thereby defeating the purpose!
-        System.load(dbghelp);
-        System.load(dbgeng);
-      } else if (! fhelp.exists() && ! feng.exists()) {
-        // neither exist. We will ignore this dir and assume
-        // they are in the system dir.
-        DTFWHome = null;
-      } else {
-        // one exists but not the other
-        //System.err.println("Error: Both files dbghelp.dll and dbgeng.dll "
-        //                   "must exist in directory " + DTFWHome);
-        throw new UnsatisfiedLinkError("Both files dbghelp.dll and " +
-                                       "dbgeng.dll must exist in " +
-                                       "directory " + DTFWHome);
+      // First place to search is co-located with sawindbg.dll in
+      // $JAVA_HOME/jre/bin (java.home property is set to $JAVA_HOME/jre):
+      searchList.add(System.getProperty("java.home") + File.separator + "bin");
+      sawindbgPath = (String) searchList.get(0) + File.separator +
+          "sawindbg.dll";
+
+      // second place to search is specified by an environment variable:
+      String DTFWHome = System.getenv("DEBUGGINGTOOLSFORWINDOWS");
+      if (DTFWHome != null) {
+        searchList.add(DTFWHome);
       }
-    }
-    if (DTFWHome == null) {
-      // The files better be in the system dir.
-      String sysDir = System.getenv("SYSTEMROOT") +
-          File.separator + "system32";
 
-      File feng = new File(sysDir + File.separator + "dbgeng.dll");
-      if (!feng.exists()) {
-        throw new UnsatisfiedLinkError("File dbgeng.dll does not exist in " +
-                                        sysDir + ".  Please search microsoft.com " +
-                                       "for Debugging Tools For Windows, and " +
-                                       "either download it to the default " +
-                                       "location, or download it to a custom " +
-                                       "location and set environment variable " +
-                                       "   DEBUGGINGTOOLSFORWINDOWS  "  +
-                                       "to the pathname of that location.");
-      }
+      // The third place to search is the install directory for the
+      // "Debugging Tools For Windows" package; so far there are three
+      // name variations that we know of:
+      String sysRoot = System.getenv("SYSTEMROOT");
+      DTFWHome = sysRoot + File.separator + ".." + File.separator +
+          "Program Files" + File.separator + "Debugging Tools For Windows";
+      searchList.add(DTFWHome);
+      searchList.add(DTFWHome + " (x86)");
+      searchList.add(DTFWHome + " (x64)");
+
+      // The last place to search is the system directory:
+      searchList.add(sysRoot + File.separator + "system32");
     }
 
+    for (int i = 0; i < searchList.size(); i++) {
+      File dir = new File((String) searchList.get(i));
+      if (!dir.exists()) {
+        if (loadLibraryDEBUG) {
+          System.err.println("DEBUG: '" + searchList.get(i) +
+              "': directory does not exist.");
+        }
+        // this search directory doesn't exist so skip it
+        continue;
+      }
+
+      dbgengPath = (String) searchList.get(i) + File.separator + "dbgeng.dll";
+      dbghelpPath = (String) searchList.get(i) + File.separator + "dbghelp.dll";
+
+      File feng = new File(dbgengPath);
+      File fhelp = new File(dbghelpPath);
+      if (feng.exists() && fhelp.exists()) {
+        // both files exist so we have a match
+        break;
+      }
+
+      // At least one of the files does not exist; no warning if both
+      // don't exist. If just one doesn't exist then we don't check
+      // loadLibraryDEBUG because we have a mis-configured system.
+      if (feng.exists()) {
+        System.err.println("WARNING: found '" + dbgengPath +
+            "' but did not find '" + dbghelpPath + "'; ignoring '" +
+            dbgengPath + "'.");
+      } else if (fhelp.exists()) {
+        System.err.println("WARNING: found '" + dbghelpPath +
+            "' but did not find '" + dbgengPath + "'; ignoring '" +
+            dbghelpPath + "'.");
+      } else if (loadLibraryDEBUG) {
+        System.err.println("DEBUG: searched '" + searchList.get(i) +
+          "': dbgeng.dll and dbghelp.dll were not found.");
+      }
+      dbgengPath = null;
+      dbghelpPath = null;
+    }
+
+    if (dbgengPath == null || dbghelpPath == null) {
+      // at least one of the files wasn't found anywhere we searched
+      String mesg = null;
+
+      if (dbgengPath == null && dbghelpPath == null) {
+        mesg = "dbgeng.dll and dbghelp.dll cannot be found. ";
+      } else if (dbgengPath == null) {
+        mesg = "dbgeng.dll cannot be found (dbghelp.dll was found). ";
+      } else {
+        mesg = "dbghelp.dll cannot be found (dbgeng.dll was found). ";
+      }
+      throw new UnsatisfiedLinkError(mesg +
+          "Please search microsoft.com for 'Debugging Tools For Windows', " +
+          "and either download it to the default location, or download it " +
+          "to a custom location and set environment variable " +
+          "'DEBUGGINGTOOLSFORWINDOWS' to the pathname of that location.");
+    }
+
+    // NOTE: The order of loads is important! If we load dbgeng.dll
+    // first, then the dependency - dbghelp.dll - will be loaded
+    // from usual DLL search thereby defeating the purpose!
+    if (loadLibraryDEBUG) {
+      System.err.println("DEBUG: loading '" + dbghelpPath + "'.");
+    }
+    System.load(dbghelpPath);
+    if (loadLibraryDEBUG) {
+      System.err.println("DEBUG: loading '" + dbgengPath + "'.");
+    }
+    System.load(dbgengPath);
+
     // Now, load sawindbg.dll
-    System.loadLibrary("sawindbg");
+    if (loadLibraryDEBUG) {
+      System.err.println("DEBUG: loading '" + sawindbgPath + "'.");
+    }
+    System.load(sawindbgPath);
+
     // where do I find '.exe', '.dll' files?
     imagePath = System.getProperty("sun.jvm.hotspot.debugger.windbg.imagePath");
     if (imagePath == null) {
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Jan 21 13:03:13 2011 -0800
@@ -30,6 +30,7 @@
 import sun.jvm.hotspot.asm.sparc.*;
 import sun.jvm.hotspot.asm.x86.*;
 import sun.jvm.hotspot.asm.ia64.*;
+import sun.jvm.hotspot.asm.amd64.*;
 import sun.jvm.hotspot.code.*;
 import sun.jvm.hotspot.compiler.*;
 import sun.jvm.hotspot.debugger.*;
@@ -198,6 +199,8 @@
          cpuHelper = new SPARCHelper();
       } else if (cpu.equals("x86")) {
          cpuHelper = new X86Helper();
+      } else if (cpu.equals("amd64")) {
+         cpuHelper = new AMD64Helper();
       } else if (cpu.equals("ia64")) {
          cpuHelper = new IA64Helper();
       } else {
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Fri Jan 21 13:01:02 2011 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/make/hotspot_distro	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/hotspot_distro	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/hotspot_version	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/hotspot_version	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2006, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,11 @@
 #
 
 # Don't put quotes (fail windows build).
-HOTSPOT_VM_COPYRIGHT=Copyright 2010
+HOTSPOT_VM_COPYRIGHT=Copyright 2011
 
 HS_MAJOR_VER=20
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=04
+HS_BUILD_NUMBER=07
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/jprt.gmk	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/jprt.gmk	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/jprt.properties	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/jprt.properties	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/build_vm_def.sh	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/build_vm_def.sh	Fri Jan 21 13:03:13 2011 -0800
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 # If we're cross compiling use that path for nm
-if [ "$ALT_COMPILER_PATH" != "" ]; then 
+if [ "$CROSS_COMPILE_ARCH" != "" ]; then 
 NM=$ALT_COMPILER_PATH/nm
 else
 NM=nm
--- a/make/linux/makefiles/buildtree.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/buildtree.make	Fri Jan 21 13:03:13 2011 -0800
@@ -124,7 +124,7 @@
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
 BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
-        env.sh env.csh .dbxrc test_gamma
+        env.sh env.csh jdkpath.sh .dbxrc test_gamma
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -318,6 +318,13 @@
 	sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \
 	) > $@
 
+jdkpath.sh: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo "JDK=${JAVA_HOME}"; \
+	) > $@	   
+
 .dbxrc:  $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
--- a/make/linux/makefiles/defs.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/defs.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/gcc.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/gcc.make	Fri Jan 21 13:03:13 2011 -0800
@@ -25,7 +25,9 @@
 #------------------------------------------------------------------------
 # CC, CPP & AS
 
-ifdef ALT_COMPILER_PATH
+# When cross-compiling the ALT_COMPILER_PATH points
+# to the cross-compilation toolset
+ifdef CROSS_COMPILE_ARCH
 CPP = $(ALT_COMPILER_PATH)/g++
 CC  = $(ALT_COMPILER_PATH)/gcc
 else
--- a/make/linux/makefiles/jvmti.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/jvmti.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/mapfile-vers-debug	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/mapfile-vers-debug	Fri Jan 21 13:03:13 2011 -0800
@@ -3,7 +3,7 @@
 #
 
 #
-# Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/mapfile-vers-product	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/mapfile-vers-product	Fri Jan 21 13:03:13 2011 -0800
@@ -3,7 +3,7 @@
 #
 
 #
-# Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/product.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/product.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/saproc.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/saproc.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/shark.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/shark.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
 # Copyright 2008, 2010 Red Hat, Inc.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
--- a/make/linux/makefiles/sparcWorks.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/sparcWorks.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/vm.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/linux/makefiles/vm.make	Fri Jan 21 13:03:13 2011 -0800
@@ -168,7 +168,9 @@
 
 # Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE.
 define findsrc
-	$(notdir $(shell find $(1) \( -name \*.c -o -name \*.cpp -o -name \*.s \) -a \! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \) ))
+	$(notdir $(shell find $(1)/. ! -name . -prune \
+		-a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \
+		-a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \)))
 endef
 
 Src_Files := $(foreach e,$(Src_Dirs),$(call findsrc,$(e)))
--- a/make/solaris/makefiles/buildtree.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/buildtree.make	Fri Jan 21 13:03:13 2011 -0800
@@ -117,7 +117,7 @@
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
 BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
-        env.ksh env.csh .dbxrc test_gamma
+        env.ksh env.csh jdkpath.sh .dbxrc test_gamma
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -314,6 +314,13 @@
 	sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \
 	) > $@
 
+jdkpath.sh: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo "JDK=${JAVA_HOME}"; \
+	) > $@	   
+
 .dbxrc:  $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
--- a/make/solaris/makefiles/defs.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/defs.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/fastdebug.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/fastdebug.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/jvmti.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/jvmti.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/optimized.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/optimized.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/product.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/product.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/saproc.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/saproc.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/vm.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/solaris/makefiles/vm.make	Fri Jan 21 13:03:13 2011 -0800
@@ -106,17 +106,17 @@
 # Not sure what the 'designed for' comment is referring too above.
 #   The order may not be too significant anymore, but I have placed this
 #   older libm before libCrun, just to make sure it's found and used first.
-LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc
+LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle
 else
 ifeq ($(COMPILER_REV_NUMERIC), 502)
 # SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
-LIBS += -ldl -lthread -lsocket -lm -lsched -ldoor
+LIBS += -ldl -lthread -lsocket -lm -lsched -ldoor -ldemangle
 else
-LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor
+LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor -ldemangle
 endif # 502
 endif # 505
 else
-LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc
+LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle
 endif # sparcWorks
 
 ifeq ("${Platform_arch}", "sparc")
@@ -188,7 +188,9 @@
 
 # Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE.
 define findsrc
-	$(notdir $(shell find $(1) \( -name \*.c -o -name \*.cpp -o -name \*.s \) -a \! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \) ))
+	$(notdir $(shell find $(1)/. ! -name . -prune \
+		-a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \
+		-a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \)))
 endef
 
 Src_Files := $(foreach e,$(Src_Dirs),$(call findsrc,$(e)))
--- a/make/windows/build.bat	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/build.bat	Fri Jan 21 13:03:13 2011 -0800
@@ -1,6 +1,6 @@
 @echo off
 REM
-REM Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+REM Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
 REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 REM
 REM This code is free software; you can redistribute it and/or modify it
--- a/make/windows/build_vm_def.sh	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/build_vm_def.sh	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,9 @@
 echo "EXPORTS" > vm1.def
 
 AWK="$MKS_HOME/awk.exe"
+if [ ! -e $AWK ]; then
+    AWK="$MKS_HOME/gawk.exe"
+fi
 GREP="$MKS_HOME/grep.exe"
 SORT="$MKS_HOME/sort.exe"
 UNIQ="$MKS_HOME/uniq.exe"
@@ -57,7 +60,7 @@
 LINK_VER="$1"
 fi
 
-if [ "x$LINK_VER" != "x800" -a  "x$LINK_VER" != "x900" ]; then
+if [ "x$LINK_VER" != "x800" -a  "x$LINK_VER" != "x900" -a "x$LINK_VER" != "x1000" ]; then
 $DUMPBIN /symbols *.obj | "$GREP" "??_7.*@@6B@" | "$GREP" -v "type_info" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def
 else
 # Can't use pipes when calling cl.exe or link.exe from IDE. Using transit file vm3.def
--- a/make/windows/create.bat	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/create.bat	Fri Jan 21 13:03:13 2011 -0800
@@ -36,6 +36,20 @@
 REM Note: Running this batch file from the Windows command shell requires
 REM that "grep" be accessible on the PATH. An MKS install does this.
 REM 
+
+cl 2>NUL >NUL
+if %errorlevel% == 0 goto nexttest
+echo Make sure cl.exe is in your PATH before running this script.
+goto end
+
+:nexttest
+grep -V 2>NUL >NUL
+if %errorlevel% == 0 goto testit
+echo Make sure grep.exe is in your PATH before running this script. Either cygwin or MKS should work.
+goto end
+
+
+:testit
 cl 2>&1 | grep "IA-64" >NUL
 if %errorlevel% == 0 goto isia64
 cl 2>&1 | grep "AMD64" >NUL
@@ -44,37 +58,40 @@
 set BUILDARCH=i486
 set Platform_arch=x86
 set Platform_arch_model=x86_32
-goto end
+goto done
 :amd64
 set ARCH=x86
 set BUILDARCH=amd64
 set Platform_arch=x86
 set Platform_arch_model=x86_64
-goto end
+goto done
 :isia64
 set ARCH=ia64
 set BUILDARCH=ia64
 set Platform_arch=ia64
 set Platform_arch_model=ia64
-:end
+:done
 
 setlocal
 
 if "%1" == "" goto usage
 
-if not "%4" == "" goto usage
+if not "%2" == "" goto usage
 
-set HotSpotWorkSpace=%1
-set HotSpotBuildSpace=%2
-set HotSpotJDKDist=%3
+REM Set HotSpotWorkSpace to the directy two steps above this script
+for %%i in ("%~dp0..") do ( set HotSpotWorkSpace=%%~dpi)
+set HotSpotBuildRoot=%HotSpotWorkSpace%build
+set HotSpotBuildSpace=%HotSpotBuildRoot%\vs
+set HotSpotJDKDist=%1
+
 
 REM figure out MSC version
 for /F %%i in ('sh %HotSpotWorkSpace%/make/windows/get_msc_ver.sh') do set %%i
 
 echo **************************************************************
-set ProjectFile=vm.vcproj
+set ProjectFile=jvm.vcproj
 if "%MSC_VER%" == "1200" (
-set ProjectFile=vm.dsp
+set ProjectFile=jvm.dsp
 echo Will generate VC6 project {unsupported}
 ) else (
 if "%MSC_VER%" == "1400" (
@@ -83,10 +100,16 @@
 if "%MSC_VER%" == "1500" (
 echo Will generate VC9 {Visual Studio 2008}
 ) else (
+if "%MSC_VER%" == "1600" (
+echo Detected Visual Studio 2010, but
+echo will generate VC9 {Visual Studio 2008}
+echo Use conversion wizard in VS 2010.
+) else (
 echo Will generate VC7 project {Visual Studio 2003 .NET}
 )
 )
 )
+)
 echo                            %ProjectFile%
 echo **************************************************************
 
@@ -118,6 +141,8 @@
 
 :test3
 if not "%HOTSPOTMKSHOME%" == "" goto makedir
+if exist c:\cygwin\bin set HOTSPOTMKSHOME=c:\cygwin\bin
+if not "%HOTSPOTMKSHOME%" == "" goto makedir
 echo Warning: please set variable HOTSPOTMKSHOME to place where 
 echo          your MKS/Cygwin installation is
 echo.
@@ -133,21 +158,24 @@
 REM This is now safe to do.
 :copyfiles
 for /D %%i in (compiler1, compiler2, tiered, core, kernel) do (
-if NOT EXIST %HotSpotBuildSpace%\%%i mkdir %HotSpotBuildSpace%\%%i
-copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\ > NUL
+if NOT EXIST %HotSpotBuildSpace%\%%i\generated mkdir %HotSpotBuildSpace%\%%i\generated
+copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\generated > NUL
 )
 
 REM force regneration of ProjectFile
 if exist %HotSpotBuildSpace%\%ProjectFile% del %HotSpotBuildSpace%\%ProjectFile%
 
 for /D %%i in (compiler1, compiler2, tiered, core, kernel) do (
-
-echo # Generated file!                                                 >    %HotSpotBuildSpace%\%%i\local.make
+echo -- %%i --
+echo # Generated file!                                                        >    %HotSpotBuildSpace%\%%i\local.make
 echo # Changing a variable below and then deleting %ProjectFile% will cause  >>    %HotSpotBuildSpace%\%%i\local.make
 echo # %ProjectFile% to be regenerated with the new values.  Changing the    >>    %HotSpotBuildSpace%\%%i\local.make
-echo # version requires rerunning create.bat.                         >>    %HotSpotBuildSpace%\%%i\local.make
+echo # version requires rerunning create.bat.                                >>    %HotSpotBuildSpace%\%%i\local.make
 echo.                                      >>    %HotSpotBuildSpace%\%%i\local.make
+echo Variant=%%i			   >>    %HotSpotBuildSpace%\%%i\local.make
+echo WorkSpace=%HotSpotWorkSpace%   	   >>    %HotSpotBuildSpace%\%%i\local.make
 echo HOTSPOTWORKSPACE=%HotSpotWorkSpace%   >>    %HotSpotBuildSpace%\%%i\local.make
+echo HOTSPOTBUILDROOT=%HotSpotBuildRoot%   >>    %HotSpotBuildSpace%\%%i\local.make
 echo HOTSPOTBUILDSPACE=%HotSpotBuildSpace% >>    %HotSpotBuildSpace%\%%i\local.make
 echo HOTSPOTJDKDIST=%HotSpotJDKDist%       >>    %HotSpotBuildSpace%\%%i\local.make
 echo ARCH=%ARCH%                           >>    %HotSpotBuildSpace%\%%i\local.make
@@ -155,42 +183,35 @@
 echo Platform_arch=%Platform_arch%         >>    %HotSpotBuildSpace%\%%i\local.make
 echo Platform_arch_model=%Platform_arch_model% >>    %HotSpotBuildSpace%\%%i\local.make
 
-pushd %HotSpotBuildSpace%\%%i
+for /D %%j in (debug, fastdebug, product) do (
+if NOT EXIST %HotSpotBuildSpace%\%%i\%%j mkdir %HotSpotBuildSpace%\%%i\%%j
+)
+
+pushd %HotSpotBuildSpace%\%%i\generated
 nmake /nologo
 popd
 
 )
 
-pushd %HotSpotBuildSpace%
+pushd %HotSpotBuildRoot%
 
-echo # Generated file!                                                 >    local.make
-echo # Changing a variable below and then deleting %ProjectFile% will cause  >>    local.make
-echo # %ProjectFile% to be regenerated with the new values.  Changing the    >>    local.make
-echo # version requires rerunning create.bat.                         >>    local.make
-echo.                                      >>    local.make
-echo HOTSPOTWORKSPACE=%HotSpotWorkSpace%   >>    local.make
-echo HOTSPOTBUILDSPACE=%HotSpotBuildSpace% >>    local.make
-echo HOTSPOTJDKDIST=%HotSpotJDKDist%       >>    local.make
-echo ARCH=%ARCH%                           >>    local.make
-echo BUILDARCH=%BUILDARCH%                 >>    local.make
-echo Platform_arch=%Platform_arch%         >>    local.make
-echo Platform_arch_model=%Platform_arch_model% >>    local.make
-
-nmake /nologo /F %HotSpotWorkSpace%/make/windows/projectfiles/common/Makefile %HotSpotBuildSpace%/%ProjectFile%
+REM It doesn't matter which variant we use here, "compiler1" is as good as any of the others - we need the common variables
+nmake /nologo /F %HotSpotWorkSpace%/make/windows/projectfiles/common/Makefile LOCAL_MAKE=%HotSpotBuildSpace%\compiler1\local.make %HotSpotBuildRoot%/%ProjectFile%
 
 popd
 
 goto end
 
 :usage
-echo Usage: create HotSpotWorkSpace HotSpotBuildSpace HotSpotJDKDist
+echo Usage: create HotSpotJDKDist
 echo.
-echo This is the interactive build setup script (as opposed to the batch
-echo build execution script). It creates HotSpotBuildSpace if necessary,
-echo copies the appropriate files out of HotSpotWorkSpace into it, and
+echo This is the VS build setup script (as opposed to the batch
+echo build execution script). It creates a build directory if necessary,
+echo copies the appropriate files out of the workspace into it, and
 echo builds and runs ProjectCreator in it. This has the side-effect of creating
 echo the %ProjectFile% file in the build space, which is then used in Visual C++.
-echo The HotSpotJDKDist defines place where JVM binaries should be placed.
+echo.
+echo The HotSpotJDKDist defines the JDK that should be used when running the JVM.
 echo Environment variable FORCE_MSC_VER allows to override MSVC version autodetection.
 echo.
 echo NOTE that it is now NOT safe to modify any of the files in the build
--- a/make/windows/create_obj_files.sh	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/create_obj_files.sh	Fri Jan 21 13:03:13 2011 -0800
@@ -107,8 +107,12 @@
 	"x86_64") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} *x86_32*" ;;
 esac
 
+# Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE.
 function findsrc {
-    $FIND ${1} \( -name \*.c -o -name \*.cpp -o -name \*.s \) -a \! \( -name ${Src_Files_EXCLUDE// / -o -name } \) | sed 's/.*\/\(.*\)/\1/';
+    $FIND ${1}/. ! -name . -prune \
+		-a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \
+		-a \! \( -name ${Src_Files_EXCLUDE// / -o -name } \) \
+		| sed 's/.*\/\(.*\)/\1/';
 }
 
 Src_Files=
--- a/make/windows/get_msc_ver.sh	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/get_msc_ver.sh	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/adlc.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/adlc.make	Fri Jan 21 13:03:13 2011 -0800
@@ -22,7 +22,6 @@
 #  
 #
 
-!include $(WorkSpace)/make/windows/makefiles/compile.make
 
 # Rules for building adlc.exe
 
@@ -46,15 +45,7 @@
 ADLCFLAGS=-q -T -U_LP64
 !endif
 
-CPP_FLAGS=$(CPP_FLAGS) \
-  /D TARGET_OS_FAMILY_windows \
-  /D TARGET_ARCH_$(Platform_arch) \
-  /D TARGET_ARCH_MODEL_$(Platform_arch_model) \
-  /D TARGET_OS_ARCH_windows_$(Platform_arch) \
-  /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model) \
-  /D TARGET_COMPILER_visCPP
-
-CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE
+ADLC_CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE
 
 CPP_INCLUDE_DIRS=\
   /I "..\generated" \
@@ -92,10 +83,10 @@
   $(AdlcOutDir)\dfa_$(Platform_arch_model).cpp
 
 {$(WorkSpace)\src\share\vm\adlc}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
+        $(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
 
 {$(WorkSpace)\src\share\vm\opto}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
+        $(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
 
 adlc.exe: main.obj adlparse.obj archDesc.obj arena.obj dfa.obj dict2.obj filebuff.obj \
           forms.obj formsopt.obj formssel.obj opcodes.obj output_c.obj output_h.obj
--- a/make/windows/makefiles/compile.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/compile.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -80,6 +80,20 @@
 CPP=ARCH_ERROR
 !endif
 
+CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS"
+
+# Must specify this for sharedRuntimeTrig.cpp
+CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN"
+
+# Used for platform dispatching
+CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_FAMILY_windows
+CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_$(Platform_arch)
+CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model)
+CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch)
+CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model)
+CPP_FLAGS=$(CPP_FLAGS) /D TARGET_COMPILER_visCPP
+
+
 # MSC_VER is a 4 digit number that tells us what compiler is being used
 #    and is generated when the local.make file is created by build.make
 #    via the script get_msc_ver.sh
@@ -138,7 +152,7 @@
 !endif
 
 # Always add the _STATIC_CPPLIB flag
-STATIC_CPPLIB_OPTION = /D _STATIC_CPPLIB
+STATIC_CPPLIB_OPTION = /D _STATIC_CPPLIB /D _DISABLE_DEPRECATE_STATIC_CPPLIB
 MS_RUNTIME_OPTION = $(MS_RUNTIME_OPTION) $(STATIC_CPPLIB_OPTION)
 CPP_FLAGS=$(CPP_FLAGS) $(MS_RUNTIME_OPTION)
 
--- a/make/windows/makefiles/debug.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/debug.make	Fri Jan 21 13:03:13 2011 -0800
@@ -26,7 +26,6 @@
 HS_FNAME=$(HS_INTERNAL_NAME).dll
 AOUT=$(HS_FNAME)
 SAWINDBG=sawindbg.dll
-LAUNCHER_NAME=hotspot.exe
 GENERATED=../generated
 
 # Allow the user to turn off precompiled headers from the command line.
@@ -34,7 +33,7 @@
 BUILD_PCH_FILE=_build_pch_file.obj
 !endif
 
-default:: $(BUILD_PCH_FILE) $(AOUT) $(LAUNCHER_NAME) checkAndBuildSA
+default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
 
 !include ../local.make
 !include compile.make
@@ -49,8 +48,10 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-$(AOUT): $(Res_Files) $(Obj_Files)
+vm.def: $(Obj_Files)
 	sh $(WorkSpace)/make/windows/build_vm_def.sh
+
+$(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LINK) @<<
   $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
 <<
--- a/make/windows/makefiles/defs.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/defs.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/fastdebug.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/fastdebug.make	Fri Jan 21 13:03:13 2011 -0800
@@ -26,7 +26,6 @@
 HS_FNAME=$(HS_INTERNAL_NAME).dll
 AOUT=$(HS_FNAME)
 SAWINDBG=sawindbg.dll
-LAUNCHER_NAME=hotspot.exe
 GENERATED=../generated
 
 # Allow the user to turn off precompiled headers from the command line.
@@ -34,7 +33,7 @@
 BUILD_PCH_FILE=_build_pch_file.obj
 !endif
 
-default:: $(BUILD_PCH_FILE) $(AOUT) $(LAUNCHER_NAME) checkAndBuildSA
+default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
 
 !include ../local.make
 !include compile.make
@@ -49,8 +48,10 @@
 # Force resources to be rebuilt every time
 $(Res_Files): FORCE
 
-$(AOUT): $(Res_Files) $(Obj_Files)
+vm.def: $(Obj_Files)
 	sh $(WorkSpace)/make/windows/build_vm_def.sh
+
+$(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LINK) @<<
   $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
 <<
--- a/make/windows/makefiles/generated.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/generated.make	Fri Jan 21 13:03:13 2011 -0800
@@ -51,6 +51,7 @@
 
 !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered")
 
+!include $(WorkSpace)/make/windows/makefiles/compile.make
 !include $(WorkSpace)/make/windows/makefiles/adlc.make
 
 !endif
--- a/make/windows/makefiles/launcher.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/launcher.make	Fri Jan 21 13:03:13 2011 -0800
@@ -22,7 +22,8 @@
 #  
 #
 
-LAUNCHER_FLAGS=$(ARCHFLAG) \
+
+LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \
 	/D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \
 	/D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \
 	/D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \
@@ -32,9 +33,11 @@
 	/D _CRT_SECURE_NO_DEPRECATE \
 	/D LINK_INTO_LIBJVM \
 	/I $(WorkSpace)\src\os\windows\launcher \
-	/I $(WorkSpace)\src\share\tools\launcher
-
-CPP_FLAGS=$(CPP_FLAGS) $(LAUNCHER_FLAGS)
+	/I $(WorkSpace)\src\share\tools\launcher \
+	/I $(WorkSpace)\src\share\vm\prims \
+	/I $(WorkSpace)\src\share\vm \
+	/I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \
+	/I $(WorkSpace)\src\os\windows\vm
 
 LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console 
 
@@ -46,22 +49,23 @@
 LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
 !endif
 
-LAUNCHERDIR = $(GAMMADIR)/src/os/windows/launcher
-LAUNCHERDIR_SHARE = $(GAMMADIR)/src/share/tools/launcher
+LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher
+LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher
 
 OUTDIR = launcher
 
 {$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj:
-	-mkdir $(OUTDIR)
-        $(CPP) $(CPP_FLAGS) /c /Fo$@ $<
+	-mkdir $(OUTDIR) 2>NUL >NUL
+        $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $<
 
 {$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj:
-	-mkdir $(OUTDIR)
-        $(CPP) $(CPP_FLAGS) /c /Fo$@ $<
+	-mkdir $(OUTDIR) 2>NUL >NUL
+        $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $<
 
 $(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h
 
-$(LAUNCHER_NAME): $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj
-	$(LINK) $(LINK_FLAGS) /out:$@ $**
+launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj
+	echo $(JAVA_HOME) > jdkpath.txt  
+	$(LINK) $(LINK_FLAGS) /out:hotspot.exe $**
 
 
--- a/make/windows/makefiles/product.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/product.make	Fri Jan 21 13:03:13 2011 -0800
@@ -25,7 +25,6 @@
 HS_INTERNAL_NAME=jvm
 HS_FNAME=$(HS_INTERNAL_NAME).dll
 AOUT=$(HS_FNAME)
-LAUNCHER_NAME=hotspot.exe
 GENERATED=../generated
 
 # Allow the user to turn off precompiled headers from the command line.
@@ -33,7 +32,7 @@
 BUILD_PCH_FILE=_build_pch_file.obj
 !endif
 
-default:: $(BUILD_PCH_FILE) $(AOUT) $(LAUNCHER_NAME) checkAndBuildSA
+default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
 
 !include ../local.make
 !include compile.make
@@ -59,8 +58,10 @@
   $(LINK_FLAGS) /out:$@ /implib:$*.lib $(Obj_Files) $(Res_Files)
 <<
 !else
-$(AOUT): $(Res_Files) $(Obj_Files)
+vm.def: $(Obj_Files)
 	sh $(WorkSpace)/make/windows/build_vm_def.sh
+
+$(AOUT): $(Res_Files) $(Obj_Files) vm.def
 	$(LINK) @<<
   $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
 <<
--- a/make/windows/makefiles/projectcreator.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/projectcreator.make	Fri Jan 21 13:03:13 2011 -0800
@@ -84,11 +84,12 @@
         -buildBase $(HOTSPOTBUILDSPACE)\%f\%b \
         -startAt src \
         -compiler $(VcVersion) \
-        -projectFileName $(HOTSPOTBUILDSPACE)\$(ProjectFile) \
+        -projectFileName $(HOTSPOTBUILDROOT)\$(ProjectFile) \
         -jdkTargetRoot $(HOTSPOTJDKDIST) \
         -define ALIGN_STACK_FRAMES \
         -define VM_LITTLE_ENDIAN \
         -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
+        -postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \
         -ignoreFile jsig.c \
         -ignoreFile jvmtiEnvRecommended.cpp \
         -ignoreFile jvmtiEnvStub.cpp \
--- a/make/windows/makefiles/rules.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/rules.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,7 @@
 JAVAC_FLAGS=-g -encoding ascii
 BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
 
-ProjectFile=vm.vcproj
+ProjectFile=jvm.vcproj
 
 !if "$(MSC_VER)" == "1200"
 
@@ -63,6 +63,11 @@
 
 VcVersion=VC9
 
+!elseif "$(MSC_VER)" == "1600"
+
+# for compatibility - we don't yet have a ProjectCreator for VC10
+VcVersion=VC9
+
 !else
 
 VcVersion=VC7
--- a/make/windows/makefiles/sanity.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/sanity.make	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/vm.make	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/makefiles/vm.make	Fri Jan 21 13:03:13 2011 -0800
@@ -71,22 +71,11 @@
 CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
 CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
 
-CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS" $(CPP_INCLUDE_DIRS)
-
-# Must specify this for sharedRuntimeTrig.cpp
-CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN"
+CPP_FLAGS=$(CPP_FLAGS) $(CPP_INCLUDE_DIRS)
 
 # Define that so jni.h is on correct side
 CPP_FLAGS=$(CPP_FLAGS) /D "_JNI_IMPLEMENTATION_"
 
-# Used for platform dispatching
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_FAMILY_windows
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_$(Platform_arch)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_COMPILER_visCPP
-
 !if "$(BUILDARCH)" == "ia64"
 STACK_SIZE="/STACK:1048576,262144"
 !else
@@ -104,6 +93,8 @@
 !endif
 !endif
 
+# If you modify exports below please do the corresponding changes in
+# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java 
 LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
   /export:JNI_GetDefaultJavaVMInitArgs       \
   /export:JNI_CreateJavaVM                   \
--- a/make/windows/projectfiles/common/Makefile	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/common/Makefile	Fri Jan 21 13:03:13 2011 -0800
@@ -22,7 +22,10 @@
 #  
 #
 
-!include local.make
+!ifdef LOCAL_MAKE
+!include $(LOCAL_MAKE)
+!endif
+
 
 WorkSpace=$(HOTSPOTWORKSPACE)
 
@@ -34,11 +37,18 @@
 !else
 !ifdef JAVA_HOME
 BootStrapDir=$(JAVA_HOME)
+!else
+!ifdef HOTSPOTJDKDIST
+BootStrapDir=$(HOTSPOTJDKDIST)
+!endif
 !endif
 !endif
 !endif
 
+
+
 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make
+!include $(WorkSpace)/make/windows/makefiles/compile.make
 
 # Pick up rules for building JVMTI (JSR-163)
 JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles
@@ -56,6 +66,9 @@
 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make
 !endif
 
+HS_INTERNAL_NAME=jvm
+!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/launcher.make
+
 default:: $(AdditionalTargets) $(JvmtiGeneratedFiles)
 
 !include $(HOTSPOTWORKSPACE)/make/hotspot_version
@@ -97,7 +110,7 @@
       -define              JRE_RELEASE_VERSION=\\\"$(JRE_RELEASE_VERSION)\\\" \
       -define              HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\"
 
-$(HOTSPOTBUILDSPACE)/$(ProjectFile): local.make $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
+$(HOTSPOTBUILDROOT)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
 	@$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
 
 clean:
--- a/make/windows/projectfiles/compiler1/Makefile	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/compiler1/Makefile	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,6 @@
 #  
 #
 
-Variant=compiler1
-!include local.make
+!include ../local.make
 
 !include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile
--- a/make/windows/projectfiles/compiler1/vm.def	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/compiler1/vm.def	Fri Jan 21 13:03:13 2011 -0800
@@ -2,6 +2,6 @@
 ; This .DEF file is a placeholder for one which is automatically
 ; generated during the build process. See
 ; make\windows\build_vm_def.sh and
-; make\windows\makefiles\makedeps.make (esp. the "-prelink"
+; make\windows\makefiles\projectcreator.make (esp. the "-prelink"
 ; options).
 ;
--- a/make/windows/projectfiles/compiler2/Makefile	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/compiler2/Makefile	Fri Jan 21 13:03:13 2011 -0800
@@ -22,8 +22,7 @@
 #  
 #
 
-Variant=compiler2
-!include local.make
+!include ../local.make
 AdlcOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\adfiles
 AdditionalTargets=$(AdlcOutDir)\ad_$(Platform_arch_model).cpp $(AdlcOutDir)\dfa_$(Platform_arch_model).cpp
 
--- a/make/windows/projectfiles/compiler2/vm.def	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/compiler2/vm.def	Fri Jan 21 13:03:13 2011 -0800
@@ -2,6 +2,6 @@
 ; This .DEF file is a placeholder for one which is automatically
 ; generated during the build process. See
 ; make\windows\build_vm_def.sh and
-; make\windows\makefiles\makedeps.make (esp. the "-prelink"
+; make\windows\makefiles\projectcreator.make (esp. the "-prelink"
 ; options).
 ;
--- a/make/windows/projectfiles/core/Makefile	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/core/Makefile	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,6 @@
 #  
 #
 
-Variant=core
-!include local.make
+!include ../local.make
 
 !include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile
--- a/make/windows/projectfiles/core/vm.def	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/core/vm.def	Fri Jan 21 13:03:13 2011 -0800
@@ -2,6 +2,6 @@
 ; This .DEF file is a placeholder for one which is automatically
 ; generated during the build process. See
 ; make\windows\build_vm_def.sh and
-; make\windows\makefiles\makedeps.make (esp. the "-prelink"
+; make\windows\makefiles\projectcreator.make (esp. the "-prelink"
 ; options).
 ;
--- a/make/windows/projectfiles/kernel/Makefile	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/kernel/Makefile	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #   
 # This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,6 @@
 #  
 #
 
-Variant=kernel
-!include local.make
+!include ../local.make
 
 !include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile
--- a/make/windows/projectfiles/kernel/vm.def	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/kernel/vm.def	Fri Jan 21 13:03:13 2011 -0800
@@ -2,6 +2,6 @@
 ; This .DEF file is a placeholder for one which is automatically
 ; generated during the build process. See
 ; make\windows\build_vm_def.sh and
-; make\windows\makefiles\makedeps.make (esp. the "-prelink"
+; make\windows\makefiles\projectcreator.make (esp. the "-prelink"
 ; options).
 ;
--- a/make/windows/projectfiles/tiered/Makefile	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/tiered/Makefile	Fri Jan 21 13:03:13 2011 -0800
@@ -22,8 +22,7 @@
 #  
 #
 
-Variant=tiered
-!include local.make
+!include ../local.make
 AdlcOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\adfiles
 AdditionalTargets=$(AdlcOutDir)\ad_$(Platform_arch_model).cpp $(AdlcOutDir)\dfa_$(Platform_arch_model).cpp
 
--- a/make/windows/projectfiles/tiered/vm.def	Fri Jan 21 13:01:02 2011 -0800
+++ b/make/windows/projectfiles/tiered/vm.def	Fri Jan 21 13:03:13 2011 -0800
@@ -2,6 +2,6 @@
 ; This .DEF file is a placeholder for one which is automatically
 ; generated during the build process. See
 ; make\windows\build_vm_def.sh and
-; make\windows\makefiles\makedeps.make (esp. the "-prelink"
+; make\windows\makefiles\projectcreator.make (esp. the "-prelink"
 ; options).
 ;
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4102,11 +4102,15 @@
   store_klass(t2, top);
   verify_oop(top);
 
+  ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
+  sub(top, t1, t1); // size of tlab's allocated portion
+  incr_allocated_bytes(t1, t2, t3);
+
   // refill the tlab with an eden allocation
   bind(do_refill);
   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
   sll_ptr(t1, LogHeapWordSize, t1);
-  // add object_size ??
+  // allocate new tlab, address returned in top
   eden_allocate(top, t1, 0, t2, t3, slow_case);
 
   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
@@ -4134,6 +4138,17 @@
   delayed()->nop();
 }
 
+void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
+                                          Register t1, Register t2) {
+  // Bump total bytes allocated by this thread
+  assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
+  assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
+  // v8 support has gone the way of the dodo
+  ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
+  add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
+  stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
+}
+
 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
   switch (cond) {
     // Note some conditions are synonyms for others
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -823,15 +823,23 @@
   };
 
   // test if x is within signed immediate range for nbits
-  static bool is_simm(int x, int nbits) { return -( 1 << nbits-1 )  <= x   &&   x  <  ( 1 << nbits-1 ); }
+  static bool is_simm(intptr_t x, int nbits) { return -( intptr_t(1) << nbits-1 )  <= x   &&   x  <  ( intptr_t(1) << nbits-1 ); }
 
   // test if -4096 <= x <= 4095
-  static bool is_simm13(int x) { return is_simm(x, 13); }
+  static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
+
+  static bool is_in_wdisp_range(address a, address b, int nbits) {
+    intptr_t d = intptr_t(b) - intptr_t(a);
+    return is_simm(d, nbits + 2);
+  }
 
   // test if label is in simm16 range in words (wdisp16).
   bool is_in_wdisp16_range(Label& L) {
-    intptr_t d = intptr_t(pc()) - intptr_t(target(L));
-    return is_simm(d, 18);
+    return is_in_wdisp_range(target(L), pc(), 16);
+  }
+  // test if the distance between two addresses fits in simm30 range in words
+  static bool is_in_wdisp30_range(address a, address b) {
+    return is_in_wdisp_range(a, b, 30);
   }
 
   enum ASIs { // page 72, v9
@@ -1843,6 +1851,8 @@
   inline void jmp( Register s1, Register s2 );
   inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
 
+  // Check if the call target is out of wdisp30 range (relative to the code cache)
+  static inline bool is_far_target(address d);
   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
   inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
   inline void callr( Register s1, Register s2 );
@@ -2389,6 +2399,8 @@
     Label&   slow_case                 // continuation point if fast allocation fails
   );
   void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
+  void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
+                            Register t1, Register t2);
 
   // interface method calling
   void lookup_interface_method(Register recv_klass,
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -588,10 +588,13 @@
 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
 
+inline bool MacroAssembler::is_far_target(address d) {
+  return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
+}
+
 // Call with a check to see if we need to deal with the added
 // expense of relocation and if we overflow the displacement
-// of the quick call instruction./
-// Check to see if we have to deal with relocations
+// of the quick call instruction.
 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
 #ifdef _LP64
   intptr_t disp;
@@ -603,14 +606,12 @@
 
   // Is this address within range of the call instruction?
   // If not, use the expensive instruction sequence
-  disp = (intptr_t)d - (intptr_t)pc();
-  if ( disp != (intptr_t)(int32_t)disp ) {
+  if (is_far_target(d)) {
     relocate(rt);
     AddressLiteral dest(d);
     jumpl_to(dest, O7, O7);
-  }
-  else {
-    Assembler::call( d, rt );
+  } else {
+    Assembler::call(d, rt);
   }
 #else
   Assembler::call( d, rt );
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2358,6 +2358,8 @@
          op->tmp3()->as_register()  == G4 &&
          op->tmp4()->as_register()  == O1 &&
          op->klass()->as_register() == G5, "must be");
+
+  LP64_ONLY( __ signx(op->len()->as_register()); )
   if (UseSlowPath ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -166,14 +166,17 @@
   Register obj,                        // result: pointer to object after successful allocation
   Register var_size_in_bytes,          // object size in bytes if unknown at compile time; invalid otherwise
   int      con_size_in_bytes,          // object size in bytes if   known at compile time
-  Register t1,                         // temp register
+  Register t1,                         // temp register, must be global register for incr_allocated_bytes
   Register t2,                         // temp register
   Label&   slow_case                   // continuation point if fast allocation fails
 ) {
+  RegisterOrConstant size_in_bytes = var_size_in_bytes->is_valid()
+    ? RegisterOrConstant(var_size_in_bytes) : RegisterOrConstant(con_size_in_bytes);
   if (UseTLAB) {
     tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
   } else {
     eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
+    incr_allocated_bytes(size_in_bytes, t1, t2);
   }
 }
 
@@ -214,7 +217,7 @@
 void C1_MacroAssembler::allocate_object(
   Register obj,                        // result: pointer to object after successful allocation
   Register t1,                         // temp register
-  Register t2,                         // temp register
+  Register t2,                         // temp register, must be a global register for try_allocate
   Register t3,                         // temp register
   int      hdr_size,                   // object header size in words
   int      obj_size,                   // object size in words
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -448,7 +448,9 @@
 
           // get the instance size
           __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
+
           __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
+
           __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
           __ verify_oop(O0_obj);
           __ mov(O0, I0);
@@ -459,6 +461,8 @@
           // get the instance size
           __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
           __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
+          __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
+
           __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
           __ verify_oop(O0_obj);
           __ mov(O0, I0);
@@ -573,6 +577,7 @@
           __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
 
           __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path);  // preserves G1_arr_size
+          __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);
 
           __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
           __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1295,16 +1295,13 @@
 // Get the method data pointer from the methodOop and set the
 // specified register to its value.
 
-void InterpreterMacroAssembler::set_method_data_pointer_offset(Register Roff) {
+void InterpreterMacroAssembler::set_method_data_pointer() {
   assert(ProfileInterpreter, "must be profiling interpreter");
   Label get_continue;
 
   ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
   test_method_data_pointer(get_continue);
   add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
-  if (Roff != noreg)
-    // Roff contains a method data index ("mdi").  It defaults to zero.
-    add(ImethodDataPtr, Roff, ImethodDataPtr);
   bind(get_continue);
 }
 
@@ -1315,10 +1312,11 @@
   Label zero_continue;
 
   // Test MDO to avoid the call if it is NULL.
-  ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr);
+  ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
   test_method_data_pointer(zero_continue);
   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
-  set_method_data_pointer_offset(O0);
+  add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
+  add(ImethodDataPtr, O0, ImethodDataPtr);
   bind(zero_continue);
 }
 
@@ -1369,7 +1367,6 @@
 }
 
 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
-                                                                Register cur_bcp,
                                                                 Register Rtmp,
                                                                 Label &profile_continue) {
   assert(ProfileInterpreter, "must be profiling interpreter");
@@ -1400,8 +1397,8 @@
   delayed()->nop();
 
   // Build it now.
-  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), cur_bcp);
-  set_method_data_pointer_offset(O0);
+  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+  set_method_data_pointer_for_bcp();
   ba(false, profile_continue);
   delayed()->nop();
   bind(done);
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -269,12 +269,11 @@
 
 #ifndef CC_INTERP
   // Interpreter profiling operations
-  void set_method_data_pointer() { set_method_data_pointer_offset(noreg); }
+  void set_method_data_pointer();
   void set_method_data_pointer_for_bcp();
-  void set_method_data_pointer_offset(Register mdi_reg);
   void test_method_data_pointer(Label& zero_continue);
   void verify_method_data_pointer();
-  void test_invocation_counter_for_mdp(Register invocation_count, Register cur_bcp, Register Rtmp, Label &profile_continue);
+  void test_invocation_counter_for_mdp(Register invocation_count, Register Rtmp, Label &profile_continue);
 
   void set_mdp_data_at(int constant, Register value);
   void increment_mdp_data_at(Address counter, Register bumped_count,
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -395,7 +395,7 @@
 //
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
-void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) {
+void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
   // Here is the register state during an interpreted call,
   // as set up by generate_method_handle_interpreter_entry():
   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
@@ -447,8 +447,9 @@
       // exception.  Since we use a C2I adapter to set up the
       // interpreter state, arguments are expected in compiler
       // argument registers.
-      methodHandle mh(raise_exception_method());
-      address c2i_entry = methodOopDesc::make_adapters(mh, CATCH);
+      assert(raise_exception_method(), "must be set");
+      address c2i_entry = raise_exception_method()->get_c2i_entry();
+      assert(c2i_entry, "method must be linked");
 
       __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
 
--- a/src/cpu/sparc/vm/sparc.ad	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/sparc.ad	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -575,7 +575,11 @@
 
 int MachCallRuntimeNode::ret_addr_offset() {
 #ifdef _LP64
-  return NativeFarCall::instruction_size;  // farcall; delay slot
+  if (MacroAssembler::is_far_target(entry_point())) {
+    return NativeFarCall::instruction_size;
+  } else {
+    return NativeCall::instruction_size;
+  }
 #else
   return NativeCall::instruction_size;  // call; delay slot
 #endif
@@ -941,7 +945,7 @@
 #endif
 }
 
-void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) {
+void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) {
   // The method which records debug information at every safepoint
   // expects the call to be the first instruction in the snippet as
   // it creates a PcDesc structure which tracks the offset of a call
@@ -963,20 +967,7 @@
   int startpos = __ offset();
 #endif /* ASSERT */
 
-#ifdef _LP64
-  // Calls to the runtime or native may not be reachable from compiled code,
-  // so we generate the far call sequence on 64 bit sparc.
-  // This code sequence is relocatable to any address, even on LP64.
-  if ( force_far_call ) {
-    __ relocate(rtype);
-    AddressLiteral dest(entry_point);
-    __ jumpl_to(dest, O7, O7);
-  }
-  else
-#endif
-  {
-     __ call((address)entry_point, rtype);
-  }
+  __ call((address)entry_point, rtype);
 
   if (preserve_g2)   __ delayed()->mov(G2, L7);
   else __ delayed()->nop();
@@ -2507,7 +2498,7 @@
     // CALL directly to the runtime
     // The user of this is responsible for ensuring that R_L7 is empty (killed).
     emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type,
-                    /*preserve_g2=*/true, /*force far call*/true);
+                    /*preserve_g2=*/true);
   %}
 
   enc_class preserve_SP %{
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1364,15 +1364,8 @@
       // We have decided to profile this method in the interpreter
       __ bind(profile_method);
 
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true);
-
-#ifdef ASSERT
-      __ tst(O0);
-      __ breakpoint_trap(Assembler::notEqual);
-#endif
-
-      __ set_method_data_pointer();
-
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
       __ ba(false, profile_method_continue);
       __ delayed()->nop();
     }
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1689,7 +1689,7 @@
       const Register G4_invoke_ctr = G4;
       __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
       if (ProfileInterpreter) {
-        __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
+        __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
         if (UseOnStackReplacement) {
           __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
         }
@@ -3393,21 +3393,21 @@
     __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
 
     if (allow_shared_alloc) {
-    // Check if tlab should be discarded (refill_waste_limit >= free)
-    __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
-    __ sub(RendValue, RoldTopValue, RfreeValue);
+      // Check if tlab should be discarded (refill_waste_limit >= free)
+      __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
+      __ sub(RendValue, RoldTopValue, RfreeValue);
 #ifdef _LP64
-    __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
+      __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
 #else
-    __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
+      __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
 #endif
-    __ cmp(RtlabWasteLimitValue, RfreeValue);
-    __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
-    __ delayed()->nop();
-
-    // increment waste limit to prevent getting stuck on this slow path
-    __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
-    __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
+      __ cmp(RtlabWasteLimitValue, RfreeValue);
+      __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
+      __ delayed()->nop();
+
+      // increment waste limit to prevent getting stuck on this slow path
+      __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
+      __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
     } else {
       // No allocation in the shared eden.
       __ br(Assembler::always, false, Assembler::pt, slow_case);
@@ -3445,6 +3445,10 @@
     __ cmp(RoldTopValue, RnewTopValue);
     __ brx(Assembler::notEqual, false, Assembler::pn, retry);
     __ delayed()->nop();
+
+    // bump total bytes allocated by this thread
+    // RoldTopValue and RtopAddr are dead, so can use G1 and G3
+    __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
   }
 
   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
--- a/src/cpu/x86/vm/assembler_x86.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -820,7 +820,20 @@
 }
 
 
-// Now the Assembler instruction (identical for 32/64 bits)
+// Now the Assembler instructions (identical for 32/64 bits)
+
+void Assembler::adcl(Address dst, int32_t imm32) {
+  InstructionMark im(this);
+  prefix(dst);
+  emit_arith_operand(0x81, rdx, dst, imm32);
+}
+
+void Assembler::adcl(Address dst, Register src) {
+  InstructionMark im(this);
+  prefix(dst, src);
+  emit_byte(0x11);
+  emit_operand(src, dst);
+}
 
 void Assembler::adcl(Register dst, int32_t imm32) {
   prefix(dst);
@@ -2195,9 +2208,7 @@
 void Assembler::orl(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefix(dst);
-  emit_byte(0x81);
-  emit_operand(rcx, dst, 4);
-  emit_long(imm32);
+  emit_arith_operand(0x81, rcx, dst, imm32);
 }
 
 void Assembler::orl(Register dst, int32_t imm32) {
@@ -2205,7 +2216,6 @@
   emit_arith(0x81, 0xC8, dst, imm32);
 }
 
-
 void Assembler::orl(Register dst, Address src) {
   InstructionMark im(this);
   prefix(src, dst);
@@ -2213,7 +2223,6 @@
   emit_operand(dst, src);
 }
 
-
 void Assembler::orl(Register dst, Register src) {
   (void) prefix_and_encode(dst->encoding(), src->encoding());
   emit_arith(0x0B, 0xC0, dst, src);
@@ -2692,20 +2701,7 @@
 void Assembler::subl(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefix(dst);
-  if (is8bit(imm32)) {
-    emit_byte(0x83);
-    emit_operand(rbp, dst, 1);
-    emit_byte(imm32 & 0xFF);
-  } else {
-    emit_byte(0x81);
-    emit_operand(rbp, dst, 4);
-    emit_long(imm32);
-  }
-}
-
-void Assembler::subl(Register dst, int32_t imm32) {
-  prefix(dst);
-  emit_arith(0x81, 0xE8, dst, imm32);
+  emit_arith_operand(0x81, rbp, dst, imm32);
 }
 
 void Assembler::subl(Address dst, Register src) {
@@ -2715,6 +2711,11 @@
   emit_operand(src, dst);
 }
 
+void Assembler::subl(Register dst, int32_t imm32) {
+  prefix(dst);
+  emit_arith(0x81, 0xE8, dst, imm32);
+}
+
 void Assembler::subl(Register dst, Address src) {
   InstructionMark im(this);
   prefix(src, dst);
@@ -4333,6 +4334,7 @@
   emit_byte(0xD3);
   emit_byte(0xF8 | encode);
 }
+
 void Assembler::sbbq(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefixq(dst);
@@ -4392,20 +4394,7 @@
 void Assembler::subq(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefixq(dst);
-  if (is8bit(imm32)) {
-    emit_byte(0x83);
-    emit_operand(rbp, dst, 1);
-    emit_byte(imm32 & 0xFF);
-  } else {
-    emit_byte(0x81);
-    emit_operand(rbp, dst, 4);
-    emit_long(imm32);
-  }
-}
-
-void Assembler::subq(Register dst, int32_t imm32) {
-  (void) prefixq_and_encode(dst->encoding());
-  emit_arith(0x81, 0xE8, dst, imm32);
+  emit_arith_operand(0x81, rbp, dst, imm32);
 }
 
 void Assembler::subq(Address dst, Register src) {
@@ -4415,6 +4404,11 @@
   emit_operand(src, dst);
 }
 
+void Assembler::subq(Register dst, int32_t imm32) {
+  (void) prefixq_and_encode(dst->encoding());
+  emit_arith(0x81, 0xE8, dst, imm32);
+}
+
 void Assembler::subq(Register dst, Address src) {
   InstructionMark im(this);
   prefixq(src, dst);
@@ -7136,9 +7130,9 @@
 }
 
 // Preserves rbx, and rdx.
-void MacroAssembler::tlab_refill(Label& retry,
-                                 Label& try_eden,
-                                 Label& slow_case) {
+Register MacroAssembler::tlab_refill(Label& retry,
+                                     Label& try_eden,
+                                     Label& slow_case) {
   Register top = rax;
   Register t1  = rcx;
   Register t2  = rsi;
@@ -7185,7 +7179,7 @@
 
   // if tlab is currently allocated (top or end != null) then
   // fill [top, end + alignment_reserve) with array object
-  testptr (top, top);
+  testptr(top, top);
   jcc(Assembler::zero, do_refill);
 
   // set up the mark word
@@ -7197,16 +7191,20 @@
   movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
   // set klass to intArrayKlass
   // dubious reloc why not an oop reloc?
-  movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
+  movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
   // store klass last.  concurrent gcs assumes klass length is valid if
   // klass field is not null.
   store_klass(top, t1);
 
+  movptr(t1, top);
+  subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
+  incr_allocated_bytes(thread_reg, t1, 0);
+
   // refill the tlab with an eden allocation
   bind(do_refill);
   movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
   shlptr(t1, LogHeapWordSize);
-  // add object_size ??
+  // allocate new tlab, address returned in top
   eden_allocate(top, t1, 0, t2, slow_case);
 
   // Check that t1 was preserved in eden_allocate.
@@ -7234,6 +7232,34 @@
   movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
   verify_tlab();
   jmp(retry);
+
+  return thread_reg; // for use by caller
+}
+
+void MacroAssembler::incr_allocated_bytes(Register thread,
+                                          Register var_size_in_bytes,
+                                          int con_size_in_bytes,
+                                          Register t1) {
+#ifdef _LP64
+  if (var_size_in_bytes->is_valid()) {
+    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
+  } else {
+    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
+  }
+#else
+  if (!thread->is_valid()) {
+    assert(t1->is_valid(), "need temp reg");
+    thread = t1;
+    get_thread(thread);
+  }
+
+  if (var_size_in_bytes->is_valid()) {
+    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
+  } else {
+    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
+  }
+  adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
+#endif
 }
 
 static const double     pi_4 =  0.7853981633974483;
--- a/src/cpu/x86/vm/assembler_x86.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -674,12 +674,14 @@
   // Utilities
 
 #ifdef _LP64
- static bool is_simm(int64_t x, int nbits) { return -( CONST64(1) << (nbits-1) )  <= x   &&   x  <  ( CONST64(1) << (nbits-1) ); }
+ static bool is_simm(int64_t x, int nbits) { return -(CONST64(1) << (nbits-1)) <= x &&
+                                                    x < (CONST64(1) << (nbits-1)); }
  static bool is_simm32(int64_t x) { return x == (int64_t)(int32_t)x; }
 #else
- static bool is_simm(int32_t x, int nbits) { return -( 1 << (nbits-1) )  <= x   &&   x  <  ( 1 << (nbits-1) ); }
+ static bool is_simm(int32_t x, int nbits) { return -(1 << (nbits-1)) <= x &&
+                                                    x < (1 << (nbits-1)); }
  static bool is_simm32(int32_t x) { return true; }
-#endif // LP64
+#endif // _LP64
 
   // Generic instructions
   // Does 32bit or 64bit as needed for the platform. In some sense these
@@ -705,7 +707,6 @@
   void push(void* v);
   void pop(void* v);
 
-
   // These do register sized moves/scans
   void rep_mov();
   void rep_set();
@@ -716,6 +717,8 @@
 
   // Vanilla instructions in lexical order
 
+  void adcl(Address dst, int32_t imm32);
+  void adcl(Address dst, Register src);
   void adcl(Register dst, int32_t imm32);
   void adcl(Register dst, Address src);
   void adcl(Register dst, Register src);
@@ -724,7 +727,6 @@
   void adcq(Register dst, Address src);
   void adcq(Register dst, Register src);
 
-
   void addl(Address dst, int32_t imm32);
   void addl(Address dst, Register src);
   void addl(Register dst, int32_t imm32);
@@ -737,7 +739,6 @@
   void addq(Register dst, Address src);
   void addq(Register dst, Register src);
 
-
   void addr_nop_4();
   void addr_nop_5();
   void addr_nop_7();
@@ -759,7 +760,6 @@
   void andq(Register dst, Address src);
   void andq(Register dst, Register src);
 
-
   // Bitwise Logical AND of Packed Double-Precision Floating-Point Values
   void andpd(XMMRegister dst, Address src);
   void andpd(XMMRegister dst, XMMRegister src);
@@ -1151,7 +1151,7 @@
 #ifdef _LP64
   void movq(Register dst, Register src);
   void movq(Register dst, Address src);
-  void movq(Address dst, Register src);
+  void movq(Address  dst, Register src);
 #endif
 
   void movq(Address     dst, MMXRegister src );
@@ -1177,7 +1177,7 @@
   void movsbq(Register dst, Register src);
 
   // Move signed 32bit immediate to 64bit extending sign
-  void movslq(Address dst, int32_t imm64);
+  void movslq(Address  dst, int32_t imm64);
   void movslq(Register dst, int32_t imm64);
 
   void movslq(Register dst, Address src);
@@ -1857,7 +1857,10 @@
     Register t2,                       // temp register
     Label&   slow_case                 // continuation point if fast allocation fails
   );
-  void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
+  Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
+  void incr_allocated_bytes(Register thread,
+                            Register var_size_in_bytes, int con_size_in_bytes,
+                            Register t1 = noreg);
 
   // interface method calling
   void lookup_interface_method(Register recv_klass,
@@ -2180,9 +2183,9 @@
   void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
   void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); }
 
-  void movsd(XMMRegister dst, XMMRegister src)    { Assembler::movsd(dst, src); }
-  void movsd(Address dst, XMMRegister src)        { Assembler::movsd(dst, src); }
-  void movsd(XMMRegister dst, Address src)        { Assembler::movsd(dst, src); }
+  void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
+  void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
+  void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
   void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); }
 
   void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1641,12 +1641,14 @@
 }
 
 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
+  Register len =  op->len()->as_register();
+  LP64_ONLY( __ movslq(len, len); )
+
   if (UseSlowPath ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
     __ jmp(*op->stub()->entry());
   } else {
-    Register len =  op->len()->as_register();
     Register tmp1 = op->tmp1()->as_register();
     Register tmp2 = op->tmp2()->as_register();
     Register tmp3 = op->tmp3()->as_register();
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -141,6 +141,7 @@
     tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
   } else {
     eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
+    incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1);
   }
 }
 
@@ -234,7 +235,7 @@
 
 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
   assert(obj == rax, "obj must be in rax, for cmpxchg");
-  assert(obj != t1 && obj != t2 && t1 != t2, "registers must be different"); // XXX really?
+  assert_different_registers(obj, t1, t2); // XXX really?
   assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
 
   try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -977,7 +977,6 @@
         // verify that that there is really a valid exception in rax,
         __ verify_not_null_oop(exception_oop);
 
-
         oop_maps = new OopMapSet();
         OopMap* oop_map = generate_oop_map(sasm, 1);
         generate_handle_exception(sasm, oop_maps, oop_map);
@@ -1037,13 +1036,16 @@
           // if we got here then the TLAB allocation failed, so try
           // refilling the TLAB or allocating directly from eden.
           Label retry_tlab, try_eden;
-          __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass)
+          const Register thread =
+            __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
 
           __ bind(retry_tlab);
 
           // get the instance size (size is postive so movl is fine for 64bit)
           __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
+
           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
+
           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
           __ verify_oop(obj);
           __ pop(rbx);
@@ -1053,7 +1055,10 @@
           __ bind(try_eden);
           // get the instance size (size is postive so movl is fine for 64bit)
           __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
+
           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
+          __ incr_allocated_bytes(thread, obj_size, 0);
+
           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
           __ verify_oop(obj);
           __ pop(rbx);
@@ -1143,12 +1148,13 @@
           // if we got here then the TLAB allocation failed, so try
           // refilling the TLAB or allocating directly from eden.
           Label retry_tlab, try_eden;
-          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx
+          const Register thread =
+            __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
 
           __ bind(retry_tlab);
 
           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
-          // since size is postive movl does right thing on 64bit
+          // since size is positive movl does right thing on 64bit
           __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
           // since size is postive movl does right thing on 64bit
           __ movl(arr_size, length);
@@ -1175,7 +1181,7 @@
 
           __ bind(try_eden);
           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
-          // since size is postive movl does right thing on 64bit
+          // since size is positive movl does right thing on 64bit
           __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
           // since size is postive movl does right thing on 64bit
           __ movl(arr_size, length);
@@ -1188,6 +1194,7 @@
           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
 
           __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
+          __ incr_allocated_bytes(thread, arr_size, 0);
 
           __ initialize_header(obj, klass, length, t1, t2);
           __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
--- a/src/cpu/x86/vm/globals_x86.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/globals_x86.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -62,7 +62,7 @@
 // due to lack of optimization caused by C++ compiler bugs
 define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
 #else
-define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
+define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5));
 #endif // AMD64
 
 define_pd_global(intx, PreInflateSpin,           10);
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -819,7 +819,7 @@
 // Set the method data pointer for the current bcp.
 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
   assert(ProfileInterpreter, "must be profiling interpreter");
-  Label zero_continue;
+  Label set_mdp;
   push(rax);
   push(rbx);
 
@@ -827,21 +827,17 @@
   // Test MDO to avoid the call if it is NULL.
   movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
   testptr(rax, rax);
-  jcc(Assembler::zero, zero_continue);
-
+  jcc(Assembler::zero, set_mdp);
   // rbx,: method
   // rsi: bcp
   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
   // rax,: mdi
-
+  // mdo is guaranteed to be non-zero here, we checked for it before the call.
   movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
-  testptr(rbx, rbx);
-  jcc(Assembler::zero, zero_continue);
   addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
-  addptr(rbx, rax);
-  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
-
-  bind(zero_continue);
+  addptr(rax, rbx);
+  bind(set_mdp);
+  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
   pop(rbx);
   pop(rax);
 }
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -855,7 +855,7 @@
 // Set the method data pointer for the current bcp.
 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
   assert(ProfileInterpreter, "must be profiling interpreter");
-  Label zero_continue;
+  Label set_mdp;
   push(rax);
   push(rbx);
 
@@ -863,21 +863,17 @@
   // Test MDO to avoid the call if it is NULL.
   movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
   testptr(rax, rax);
-  jcc(Assembler::zero, zero_continue);
-
+  jcc(Assembler::zero, set_mdp);
   // rbx: method
   // r13: bcp
   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
   // rax: mdi
-
+  // mdo is guaranteed to be non-zero here, we checked for it before the call.
   movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
-  testptr(rbx, rbx);
-  jcc(Assembler::zero, zero_continue);
   addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
-  addptr(rbx, rax);
-  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
-
-  bind(zero_continue);
+  addptr(rax, rbx);
+  bind(set_mdp);
+  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
   pop(rbx);
   pop(rax);
 }
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -390,7 +390,7 @@
 //
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
-void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) {
+void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
   // Here is the register state during an interpreted call,
   // as set up by generate_method_handle_interpreter_entry():
   // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
@@ -451,8 +451,9 @@
       // exception.  Since we use a C2I adapter to set up the
       // interpreter state, arguments are expected in compiler
       // argument registers.
-      methodHandle mh(raise_exception_method());
-      address c2i_entry = methodOopDesc::make_adapters(mh, CHECK);
+      assert(raise_exception_method(), "must be set");
+      address c2i_entry = raise_exception_method()->get_c2i_entry();
+      assert(c2i_entry, "method must be linked");
 
       const Register rdi_pc = rax;
       __ pop(rdi_pc);  // caller PC
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1367,15 +1367,9 @@
     if (ProfileInterpreter) {
       // We have decided to profile this method in the interpreter
       __ bind(profile_method);
-
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true);
-
-      __ movptr(rbx, Address(rbp, method_offset));   // restore methodOop
-      __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
-      __ test_method_data_pointer(rax, profile_method_continue);
-      __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      __ get_method(rbx);
       __ jmp(profile_method_continue);
     }
     // Handle overflow of counter and compile method
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1383,20 +1383,9 @@
     if (ProfileInterpreter) {
       // We have decided to profile this method in the interpreter
       __ bind(profile_method);
-
-      __ call_VM(noreg,
-                 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method),
-                 r13, true);
-
-      __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
-      __ movptr(rax, Address(rbx,
-                             in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-                rax);
-      __ test_method_data_pointer(rax, profile_method_continue);
-      __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-              rax);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      __ get_method(rbx);
       __ jmp(profile_method_continue);
     }
     // Handle overflow of counter and compile method
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1665,16 +1665,9 @@
     if (ProfileInterpreter) {
       // Out-of-line code to allocate method data oop.
       __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
       __ load_unsigned_byte(rbx, Address(rsi, 0));  // restore target bytecode
-      __ movptr(rcx, Address(rbp, method_offset));
-      __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
-      __ test_method_data_pointer(rcx, dispatch);
-      // offset non-null mdp by MDO::data_offset() + IR::profile_method()
-      __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
-      __ addptr(rcx, rax);
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
+      __ set_method_data_pointer_for_bcp();
       __ jmp(dispatch);
     }
 
@@ -3203,10 +3196,12 @@
   const bool allow_shared_alloc =
     Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
 
+  const Register thread = rcx;
+  if (UseTLAB || allow_shared_alloc) {
+    __ get_thread(thread);
+  }
+
   if (UseTLAB) {
-    const Register thread = rcx;
-
-    __ get_thread(thread);
     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
     __ lea(rbx, Address(rax, rdx, Address::times_1));
     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
@@ -3247,6 +3242,8 @@
 
     // if someone beat us on the allocation, try again, otherwise continue
     __ jcc(Assembler::notEqual, retry);
+
+    __ incr_allocated_bytes(thread, rdx, 0);
   }
 
   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
@@ -3256,12 +3253,12 @@
     __ decrement(rdx, sizeof(oopDesc));
     __ jcc(Assembler::zero, initialize_header);
 
-  // Initialize topmost object field, divide rdx by 8, check if odd and
-  // test if zero.
+    // Initialize topmost object field, divide rdx by 8, check if odd and
+    // test if zero.
     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
 
-  // rdx must have been multiple of 8
+    // rdx must have been multiple of 8
 #ifdef ASSERT
     // make sure rdx was multiple of 8
     Label L;
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1695,21 +1695,9 @@
     if (ProfileInterpreter) {
       // Out-of-line code to allocate method data oop.
       __ bind(profile_method);
-      __ call_VM(noreg,
-                 CAST_FROM_FN_PTR(address,
-                                  InterpreterRuntime::profile_method), r13);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
       __ load_unsigned_byte(rbx, Address(r13, 0));  // restore target bytecode
-      __ movptr(rcx, Address(rbp, method_offset));
-      __ movptr(rcx, Address(rcx,
-                             in_bytes(methodOopDesc::method_data_offset())));
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-                rcx);
-      __ test_method_data_pointer(rcx, dispatch);
-      // offset non-null mdp by MDO::data_offset() + IR::profile_method()
-      __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
-      __ addptr(rcx, rax);
-      __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
-                rcx);
+      __ set_method_data_pointer_for_bcp();
       __ jmp(dispatch);
     }
 
@@ -3266,6 +3254,8 @@
 
     // if someone beat us on the allocation, try again, otherwise continue
     __ jcc(Assembler::notEqual, retry);
+
+    __ incr_allocated_bytes(r15_thread, rdx, 0);
   }
 
   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/linux/vm/decoder_linux.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "prims/jvm.h"
+#include "utilities/decoder.hpp"
+
+#include <cxxabi.h>
+
+bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
+  int   status;
+  char* result;
+  size_t size = (size_t)buflen;
+
+  // Don't pass buf to __cxa_demangle. In case of the 'buf' is too small,
+  // __cxa_demangle will call system "realloc" for additional memory, which
+  // may use different malloc/realloc mechanism that allocates 'buf'.
+  if ((result = abi::__cxa_demangle(symbol, NULL, NULL, &status)) != NULL) {
+    jio_snprintf(buf, buflen, "%s", result);
+      // call c library's free
+      ::free(result);
+      return true;
+  }
+  return false;
+}
--- a/src/os/linux/vm/os_linux.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/linux/vm/os_linux.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -59,6 +59,7 @@
 #include "services/attachListener.hpp"
 #include "services/runtimeService.hpp"
 #include "thread_linux.inline.hpp"
+#include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
 #include "utilities/growableArray.hpp"
@@ -114,6 +115,7 @@
 # include <link.h>
 # include <stdint.h>
 # include <inttypes.h>
+# include <sys/ioctl.h>
 
 #define MAX_PATH    (2 * K)
 
@@ -1608,10 +1610,9 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() {
-  const char *prop = Arguments::get_property("java.io.tmpdir");
-  return prop == NULL ? "/tmp" : prop;
-}
+// This must be hard coded because it's the system's temporary
+// directory not the java application's temp directory, ala java.io.tmpdir.
+const char* os::get_temp_directory() { return "/tmp"; }
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
@@ -1688,14 +1689,23 @@
   Dl_info dlinfo;
 
   if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) {
-    if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
-    if (offset) *offset = addr - (address)dlinfo.dli_saddr;
+    if (buf != NULL) {
+      if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
+        jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
+      }
+    }
+    if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
     return true;
-  } else {
-    if (buf) buf[0] = '\0';
-    if (offset) *offset = -1;
-    return false;
+  } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
+    if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
+       dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
+       return true;
+    }
   }
+
+  if (buf != NULL) buf[0] = '\0';
+  if (offset != NULL) *offset = -1;
+  return false;
 }
 
 struct _address_to_library_name {
@@ -4423,6 +4433,15 @@
   return 1;
 }
 
+int os::socket_available(int fd, jint *pbytes) {
+  // Linux doc says EINTR not returned, unlike Solaris
+  int ret = ::ioctl(fd, FIONREAD, pbytes);
+
+  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
+  // is expected to return 0 on failure and 1 on success to the jdk.
+  return (ret < 0) ? 0 : 1;
+}
+
 // Map a block of memory.
 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
                      char *addr, size_t bytes, bool read_only,
--- a/src/os/linux/vm/os_linux.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/linux/vm/os_linux.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -45,7 +45,6 @@
 #include <unistd.h>
 #include <sys/socket.h>
 #include <sys/poll.h>
-#include <sys/ioctl.h>
 #include <netdb.h>
 
 inline void* os::thread_local_storage_at(int index) {
@@ -268,16 +267,6 @@
   RESTARTABLE_RETURN_INT(::sendto(fd, buf, len, (unsigned int) flags, to, tolen));
 }
 
-inline int os::socket_available(int fd, jint *pbytes) {
-  // Linux doc says EINTR not returned, unlike Solaris
-  int ret = ::ioctl(fd, FIONREAD, pbytes);
-
-  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
-  // is expected to return 0 on failure and 1 on success to the jdk.
-  return (ret < 0) ? 0 : 1;
-}
-
-
 inline int os::socket_shutdown(int fd, int howto){
   return ::shutdown(fd, howto);
 }
--- a/src/os/linux/vm/perfMemory_linux.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -635,7 +635,29 @@
     return -1;
   }
 
-  return fd;
+  // Verify that we have enough disk space for this file.
+  // We'll get random SIGBUS crashes on memory accesses if
+  // we don't.
+
+  for (size_t seekpos = 0; seekpos < size; seekpos += os::vm_page_size()) {
+    int zero_int = 0;
+    result = (int)os::seek_to_file_offset(fd, (jlong)(seekpos));
+    if (result == -1 ) break;
+    RESTARTABLE(::write(fd, &zero_int, 1), result);
+    if (result != 1) {
+      if (errno == ENOSPC) {
+        warning("Insufficient space for shared memory file:\n   %s\nTry using the -Djava.io.tmpdir= option to select an alternate temp location.\n", filename);
+      }
+      break;
+    }
+  }
+
+  if (result != -1) {
+    return fd;
+  } else {
+    RESTARTABLE(::close(fd), result);
+    return -1;
+  }
 }
 
 // open the shared memory file for the given user and vmid. returns
--- a/src/os/posix/launcher/java_md.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/posix/launcher/java_md.c	Fri Jan 21 13:03:13 2011 -0800
@@ -812,13 +812,10 @@
 
 #ifdef GAMMA
     {
-       /* gamma launcher uses JAVA_HOME or ALT_JAVA_HOME environment variable to find JDK/JRE */
-       char* java_home_var = getenv("ALT_JAVA_HOME");
+       /* gamma launcher uses JAVA_HOME environment variable to find JDK/JRE */
+       char* java_home_var = getenv("JAVA_HOME");
        if (java_home_var == NULL) {
-          java_home_var = getenv("JAVA_HOME");
-       }
-       if (java_home_var == NULL) {
-          printf("JAVA_HOME or ALT_JAVA_HOME must point to a valid JDK/JRE to run gamma\n");
+          printf("JAVA_HOME must point to a valid JDK/JRE to run gamma\n");
           return JNI_FALSE;
        }
        snprintf(buf, bufsize, "%s", java_home_var);
@@ -1837,7 +1834,7 @@
     if (pthread_create(&tid, &attr, (void *(*)(void*))continuation, (void*)args) == 0) {
       void * tmp;
       pthread_join(tid, &tmp);
-      rslt = (int)tmp;
+      rslt = (int)(intptr_t)tmp;
     } else {
      /*
       * Continue execution in current thread if for some reason (e.g. out of
@@ -1855,7 +1852,7 @@
     if (thr_create(NULL, stack_size, (void *(*)(void *))continuation, args, flags, &tid) == 0) {
       void * tmp;
       thr_join(tid, NULL, &tmp);
-      rslt = (int)tmp;
+      rslt = (int)(intptr_t)tmp;
     } else {
       /* See above. Continue in current thread if thr_create() failed */
       rslt = continuation(args);
--- a/src/os/posix/launcher/launcher.script	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/posix/launcher/launcher.script	Fri Jan 21 13:03:13 2011 -0800
@@ -95,17 +95,21 @@
         ;;
 esac
 
+# Find out the absolute path to this script
+MYDIR=$(cd $(dirname $SCRIPT) && pwd)
+
+JDK=
 if [ "${ALT_JAVA_HOME}" = "" ]; then
-    if [ "${JAVA_HOME}" = "" ]; then
-	echo "Neither ALT_JAVA_HOME nor JAVA_HOME is set. Aborting.";
-	exit 1;
-    else
-	JDK=${JAVA_HOME%%/jre};
-    fi
+    source ${MYDIR}/jdkpath.sh
 else 
     JDK=${ALT_JAVA_HOME%%/jre};
 fi
 
+if [ "${JDK}" = "" ]; then
+    echo Failed to find JDK. ALT_JAVA_HOME is not set or ./jdkpath.sh is empty or not found.
+    exit 1
+fi
+
 # We will set the LD_LIBRARY_PATH as follows:
 #     o		$JVMPATH (directory portion only)
 #     o		$JRE/lib/$ARCH
--- a/src/os/solaris/dtrace/hotspot.d	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/solaris/dtrace/hotspot.d	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/solaris/vm/decoder_solaris.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "utilities/decoder.hpp"
+
+#include <demangle.h>
+
+bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
+  return !cplus_demangle(symbol, buf, (size_t)buflen);
+}
--- a/src/os/solaris/vm/os_solaris.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/solaris/vm/os_solaris.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,6 +57,7 @@
 #include "services/attachListener.hpp"
 #include "services/runtimeService.hpp"
 #include "thread_solaris.inline.hpp"
+#include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
 #include "utilities/growableArray.hpp"
@@ -79,6 +80,7 @@
 // put OS-includes here
 # include <dlfcn.h>
 # include <errno.h>
+# include <exception>
 # include <link.h>
 # include <poll.h>
 # include <pthread.h>
@@ -281,7 +283,7 @@
                is_error_reported(),
               "sp must be inside of selected thread stack");
 
-    thread->_self_raw_id = raw_id;  // mark for quick retrieval
+    thread->set_self_raw_id(raw_id);  // mark for quick retrieval
     _get_thread_cache[ index ] = thread;
   }
   return thread;
@@ -1474,6 +1476,13 @@
   return &allowdebug_blocked_sigs;
 }
 
+
+void _handle_uncaught_cxx_exception() {
+  VMError err("An uncaught C++ exception");
+  err.report_and_die();
+}
+
+
 // First crack at OS-specific initialization, from inside the new thread.
 void os::initialize_thread() {
   int r = thr_main() ;
@@ -1563,6 +1572,7 @@
    // use the dynamic check for T2 libthread.
 
   os::Solaris::init_thread_fpu_state();
+  std::set_terminate(_handle_uncaught_cxx_exception);
 }
 
 
@@ -1874,10 +1884,9 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() {
-  const char *prop = Arguments::get_property("java.io.tmpdir");
-  return prop == NULL ? "/tmp" : prop;
-}
+// This must be hard coded because it's the system's temporary
+// directory not the java application's temp directory, ala java.io.tmpdir.
+const char* os::get_temp_directory() { return "/tmp"; }
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
@@ -1969,27 +1978,42 @@
       Sym * info;
       if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
                        RTLD_DL_SYMENT)) {
-          if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
-          if (offset) *offset = addr - (address)dlinfo.dli_saddr;
-
-          // check if the returned symbol really covers addr
-          return ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr);
-      } else {
-          if (buf) buf[0] = '\0';
-          if (offset) *offset  = -1;
-          return false;
+        if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
+          if (buf != NULL) {
+            if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
+              jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
+            }
+            if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
+            return true;
+        }
       }
+      if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
+        if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
+          dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
+          return true;
+        }
+      }
+      if (buf != NULL) buf[0] = '\0';
+      if (offset != NULL) *offset  = -1;
+      return false;
   } else {
       // no, only dladdr is available
-      if(dladdr((void *)addr, &dlinfo)) {
-          if (buf) jio_snprintf(buf, buflen, dlinfo.dli_sname);
-          if (offset) *offset = addr - (address)dlinfo.dli_saddr;
+      if (dladdr((void *)addr, &dlinfo)) {
+        if (buf != NULL) {
+          if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
+            jio_snprintf(buf, buflen, dlinfo.dli_sname);
+        }
+        if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
+        return true;
+      } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
+        if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
+          dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
           return true;
-      } else {
-          if (buf) buf[0] = '\0';
-          if (offset) *offset  = -1;
-          return false;
+        }
       }
+      if (buf != NULL) buf[0] = '\0';
+      if (offset != NULL) *offset  = -1;
+      return false;
   }
 }
 
@@ -5172,7 +5196,7 @@
   int o_delete = (oflag & O_DELETE);
   oflag = oflag & ~O_DELETE;
 
-  fd = ::open(path, oflag, mode);
+  fd = ::open64(path, oflag, mode);
   if (fd == -1) return -1;
 
   //If the open succeeded, the file might still be a directory
--- a/src/os/solaris/vm/thread_solaris.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/solaris/vm/thread_solaris.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,10 +53,10 @@
 
   uintptr_t raw = pd_raw_thread_id();
   int ix = pd_cache_index(raw);
-  Thread *Candidate = ThreadLocalStorage::_get_thread_cache[ix];
-  if (Candidate->_self_raw_id == raw) {
+  Thread* candidate = ThreadLocalStorage::_get_thread_cache[ix];
+  if (candidate->self_raw_id() == raw) {
     // hit
-    return Candidate;
+    return candidate;
   } else {
     return ThreadLocalStorage::get_thread_via_cache_slowly(raw, ix);
   }
--- a/src/os/windows/launcher/java_md.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/windows/launcher/java_md.c	Fri Jan 21 13:03:13 2011 -0800
@@ -22,6 +22,7 @@
  *
  */
 
+#include <ctype.h>
 #include <windows.h>
 #include <io.h>
 #include <process.h>
@@ -486,16 +487,62 @@
 
 #else /* ifndef GAMMA */
 
-    /* gamma launcher uses JAVA_HOME or ALT_JAVA_HOME environment variable to find JDK/JRE */
-    char* java_home_var = getenv("ALT_JAVA_HOME");
-    if (java_home_var == NULL) {
-       java_home_var = getenv("JAVA_HOME");
+    char env[MAXPATHLEN + 1];
+
+    /* gamma launcher uses ALT_JAVA_HOME environment variable or jdkpath.txt file to find JDK/JRE */
+
+    if (getenv("ALT_JAVA_HOME") != NULL) {
+       snprintf(buf, bufsize, "%s", getenv("ALT_JAVA_HOME"));
     }
-    if (java_home_var == NULL) {
-       printf("JAVA_HOME or ALT_JAVA_HOME must point to a valid JDK/JRE to run gamma\n");
-       return JNI_FALSE;
+    else {
+       char path[MAXPATHLEN + 1];
+       char* p;
+       int len;
+       FILE* fp;
+
+       // find the path to the currect executable
+       len = GetModuleFileName(NULL, path, MAXPATHLEN + 1);
+       if (len == 0 || len > MAXPATHLEN) {
+          printf("Could not get directory of current executable.");
+          return JNI_FALSE;
+       }
+       // remove last path component ("hotspot.exe")
+       p = strrchr(path, '\\');
+       if (p == NULL) {
+          printf("Could not parse directory of current executable.\n");
+          return JNI_FALSE;
+       }
+       *p = '\0';
+
+       // open jdkpath.txt and read JAVA_HOME from it
+       if (strlen(path) + strlen("\\jdkpath.txt") + 1 >= MAXPATHLEN) {
+          printf("Path too long: %s\n", path);
+          return JNI_FALSE;
+       }
+       strcat(path, "\\jdkpath.txt");
+       fp = fopen(path, "r");
+       if (fp == NULL) {
+          printf("Could not open file %s to get path to JDK.\n", path);
+          return JNI_FALSE;
+       }
+
+       if (fgets(buf, bufsize, fp) == NULL) {
+          printf("Could not read from file %s to get path to JDK.\n", path);
+          fclose(fp);
+          return JNI_FALSE;
+       }
+       // trim the buffer
+       p = buf + strlen(buf) - 1;
+       while(isspace(*p)) {
+          *p = '\0';
+          p--;
+       }
+       fclose(fp);
     }
-    snprintf(buf, bufsize, "%s", java_home_var);
+
+    _snprintf(env, MAXPATHLEN, "JAVA_HOME=%s", buf);
+    _putenv(env);
+
     return JNI_TRUE;
 #endif /* ifndef GAMMA */
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/decoder_windows.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "prims/jvm.h"
+#include "utilities/decoder.hpp"
+
+HMODULE                   Decoder::_dbghelp_handle = NULL;
+bool                      Decoder::_can_decode_in_vm = false;
+pfn_SymGetSymFromAddr64   Decoder::_pfnSymGetSymFromAddr64 = NULL;
+pfn_UndecorateSymbolName  Decoder::_pfnUndecorateSymbolName = NULL;
+
+void Decoder::initialize() {
+  if (!_initialized) {
+    _initialized = true;
+
+    HMODULE handle = ::LoadLibrary("dbghelp.dll");
+    if (!handle) {
+      _decoder_status = helper_not_found;
+        return;
+    }
+
+    _dbghelp_handle = handle;
+
+    pfn_SymSetOptions _pfnSymSetOptions = (pfn_SymSetOptions)::GetProcAddress(handle, "SymSetOptions");
+    pfn_SymInitialize _pfnSymInitialize = (pfn_SymInitialize)::GetProcAddress(handle, "SymInitialize");
+    _pfnSymGetSymFromAddr64 = (pfn_SymGetSymFromAddr64)::GetProcAddress(handle, "SymGetSymFromAddr64");
+    _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)GetProcAddress(handle, "UnDecorateSymbolName");
+
+    if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
+      _pfnSymGetSymFromAddr64 = NULL;
+      _pfnUndecorateSymbolName = NULL;
+      ::FreeLibrary(handle);
+      _dbghelp_handle = NULL;
+      _decoder_status = helper_func_error;
+      return;
+    }
+
+    _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS);
+    if (!_pfnSymInitialize(GetCurrentProcess(), NULL, TRUE)) {
+      _pfnSymGetSymFromAddr64 = NULL;
+      _pfnUndecorateSymbolName = NULL;
+      ::FreeLibrary(handle);
+      _dbghelp_handle = NULL;
+      _decoder_status = helper_init_error;
+      return;
+    }
+
+     // find out if jvm.dll contains private symbols, by decoding
+     // current function and comparing the result
+     address addr = (address)Decoder::initialize;
+     char buf[MAX_PATH];
+     if (decode(addr, buf, sizeof(buf), NULL) == no_error) {
+       _can_decode_in_vm = !strcmp(buf, "Decoder::initialize");
+     }
+  }
+}
+
+void Decoder::uninitialize() {
+  assert(_initialized, "Decoder not yet initialized");
+  _pfnSymGetSymFromAddr64 = NULL;
+  _pfnUndecorateSymbolName = NULL;
+  if (_dbghelp_handle != NULL) {
+    ::FreeLibrary(_dbghelp_handle);
+  }
+  _initialized = false;
+}
+
+bool Decoder::can_decode_C_frame_in_vm() {
+  initialize();
+  return  _can_decode_in_vm;
+}
+
+
+Decoder::decoder_status Decoder::decode(address addr, char *buf, int buflen, int *offset) {
+  assert(_initialized, "Decoder not yet initialized");
+  if (_pfnSymGetSymFromAddr64 != NULL) {
+    PIMAGEHLP_SYMBOL64 pSymbol;
+    char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
+    pSymbol = (PIMAGEHLP_SYMBOL64)symbolInfo;
+    pSymbol->MaxNameLength = MAX_PATH;
+    pSymbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
+    DWORD64 displacement;
+    if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
+      if (buf != NULL) {
+        if (!demangle(pSymbol->Name, buf, buflen)) {
+          jio_snprintf(buf, buflen, "%s", pSymbol->Name);
+        }
+      }
+      if (offset != NULL) *offset = (int)displacement;
+      return no_error;
+    }
+  }
+  return helper_not_found;
+}
+
+bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
+  assert(_initialized, "Decoder not yet initialized");
+  return _pfnUndecorateSymbolName != NULL &&
+         _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
+}
+
--- a/src/os/windows/vm/os_windows.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os/windows/vm/os_windows.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * CopyrighT (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,6 +62,7 @@
 #include "services/attachListener.hpp"
 #include "services/runtimeService.hpp"
 #include "thread_windows.inline.hpp"
+#include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
 #include "utilities/growableArray.hpp"
@@ -1043,9 +1044,9 @@
     return 0;
 }
 
+// This must be hard coded because it's the system's temporary
+// directory not the java application's temp directory, ala java.io.tmpdir.
 const char* os::get_temp_directory() {
-  const char *prop = Arguments::get_property("java.io.tmpdir");
-  if (prop != 0) return prop;
   static char path_buf[MAX_PATH];
   if (GetTempPath(MAX_PATH, path_buf)>0)
     return path_buf;
@@ -1365,12 +1366,11 @@
 
 bool os::dll_address_to_function_name(address addr, char *buf,
                                       int buflen, int *offset) {
-  // Unimplemented on Windows - in order to use SymGetSymFromAddr(),
-  // we need to initialize imagehlp/dbghelp, then load symbol table
-  // for every module. That's too much work to do after a fatal error.
-  // For an example on how to implement this function, see 1.4.2.
-  if (offset)  *offset  = -1;
-  if (buf) buf[0] = '\0';
+  if (Decoder::decode(addr, buf, buflen, offset) == Decoder::no_error) {
+    return true;
+  }
+  if (offset != NULL)  *offset  = -1;
+  if (buf != NULL) buf[0] = '\0';
   return false;
 }
 
@@ -1711,14 +1711,11 @@
   buf[0] = '\0';
   if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) {
      // Support for the gamma launcher. Check for an
-     // ALT_JAVA_HOME or JAVA_HOME environment variable
+     // JAVA_HOME environment variable
      // and fix up the path so it looks like
      // libjvm.so is installed there (append a fake suffix
      // hotspot/libjvm.so).
-     char* java_home_var = ::getenv("ALT_JAVA_HOME");
-     if (java_home_var == NULL) {
-        java_home_var = ::getenv("JAVA_HOME");
-     }
+     char* java_home_var = ::getenv("JAVA_HOME");
      if (java_home_var != NULL && java_home_var[0] != 0) {
 
         strncpy(buf, java_home_var, buflen);
@@ -2007,6 +2004,16 @@
   int   number;
 };
 
+// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
+// C++ compiler contain this error code. Because this is a compiler-generated
+// error, the code is not listed in the Win32 API header files.
+// The code is actually a cryptic mnemonic device, with the initial "E"
+// standing for "exception" and the final 3 bytes (0x6D7363) representing the
+// ASCII values of "msc".
+
+#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
+
+
 struct siglabel exceptlabels[] = {
     def_excpt(EXCEPTION_ACCESS_VIOLATION),
     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
@@ -2031,6 +2038,7 @@
     def_excpt(EXCEPTION_INVALID_DISPOSITION),
     def_excpt(EXCEPTION_GUARD_PAGE),
     def_excpt(EXCEPTION_INVALID_HANDLE),
+    def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
     NULL, 0
 };
 
@@ -2264,7 +2272,6 @@
     }
   }
 
-
   if (t != NULL && t->is_Java_thread()) {
     JavaThread* thread = (JavaThread*) t;
     bool in_java = thread->thread_state() == _thread_in_Java;
@@ -2468,8 +2475,9 @@
       } // switch
     }
 #ifndef _WIN64
-    if ((thread->thread_state() == _thread_in_Java) ||
-        (thread->thread_state() == _thread_in_native) )
+    if (((thread->thread_state() == _thread_in_Java) ||
+        (thread->thread_state() == _thread_in_native)) &&
+        exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
     {
       LONG result=Handle_FLT_Exception(exceptionInfo);
       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
@@ -2493,6 +2501,7 @@
       case EXCEPTION_ILLEGAL_INSTRUCTION_2:
       case EXCEPTION_INT_OVERFLOW:
       case EXCEPTION_INT_DIVIDE_BY_ZERO:
+      case EXCEPTION_UNCAUGHT_CXX_EXCEPTION:
       {  report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
                        exceptionInfo->ContextRecord);
       }
--- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,8 @@
 inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
 inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
 
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
   intptr_t rv;
   __asm__ volatile(
--- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,11 +100,6 @@
   return exchange_value;
 }
 
-extern "C" {
-  // defined in linux_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-}
-
 #ifdef AMD64
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
@@ -164,9 +159,9 @@
   return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
 }
 
-#else
-//inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-//inline void Atomic::store  (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
 
 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
   return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
@@ -189,6 +184,12 @@
   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
+extern "C" {
+  // defined in linux_x86.s
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value) {
   return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
 }
@@ -200,6 +201,21 @@
 inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value) {
   return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
 }
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
 #endif // AMD64
 
 #endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.s	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
 	.globl _mmx_Copy_arrayof_conjoint_jshorts
 
         .globl _Atomic_cmpxchg_long
+        .globl _Atomic_move_long
 
 	.text
 
@@ -653,3 +654,15 @@
         popl     %ebx
         ret
 
+
+        # Support for jlong Atomic::load and Atomic::store.
+        # void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
+        .p2align 4,,15
+	.type    _Atomic_move_long,@function
+_Atomic_move_long:
+        movl     4(%esp), %eax   # src
+        fildll    (%eax)
+        movl     8(%esp), %eax   # dest
+        fistpll   (%eax)
+        ret
+
--- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
 
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "vm_version_x86.hpp"
 
@@ -64,11 +65,11 @@
 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
 
@@ -79,11 +80,11 @@
 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
 
@@ -178,7 +179,7 @@
                           : "0" (v), "r" (p)
                           : "memory");
 #else
-  *p = v; fence();
+  release_store(p, v); fence();
 #endif // AMD64
 }
 
--- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,14 +35,12 @@
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
 inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
 inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
 inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
 
 inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
 inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
 inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
@@ -54,8 +52,49 @@
 inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
 inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
 
+
+#ifdef _LP64
+
+inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
+inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
 inline jlong Atomic::load(volatile jlong* src) { return *src; }
 
+#else
+
+extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
+extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
+
+inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
+#ifdef COMPILER2
+  // Compiler2 does not support v8, it is used only for v9.
+  assert (VM_Version::v9_instructions_work(), "only supported on v9");
+  _Atomic_move_long_v9(src, dst);
+#else
+  // The branch is cheaper then emulated LDD.
+  if (VM_Version::v9_instructions_work()) {
+    _Atomic_move_long_v9(src, dst);
+  } else {
+    _Atomic_move_long_v8(src, dst);
+  }
+#endif
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif
+
 #ifdef _GNU_SOURCE
 
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
--- a/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,11 +77,11 @@
 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
 
@@ -92,11 +92,11 @@
 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
 
@@ -120,11 +120,11 @@
 inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { *p = v; fence(); }
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { release_store(p, v); fence(); }
 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { *p = v; fence(); }
-inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store(p, v); fence(); }
 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
 
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -585,6 +585,13 @@
   sigaddset(&newset, sig);
   sigprocmask(SIG_UNBLOCK, &newset, NULL);
 
+  // Determine which sort of error to throw.  Out of swap may signal
+  // on the thread stack, which could get a mapping error when touched.
+  address addr = (address) info->si_addr;
+  if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) {
+    vm_exit_out_of_memory(0, "Out of swap space to map in thread stack.");
+  }
+
   VMError err(t, sig, pc, info, ucVoid);
   err.report_and_die();
 
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -152,6 +152,39 @@
         .nonvolatile
         .end
 
+  // Support for jlong Atomic::load and Atomic::store on v8.
+  //
+  // void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
+  //
+  // Arguments:
+  //      src:  O0
+  //      dest: O1
+  //
+  // Overwrites O2 and O3
+
+        .inline _Atomic_move_long_v8,2
+        .volatile
+        ldd     [%o0], %o2
+        std     %o2, [%o1]
+        .nonvolatile
+        .end
+
+  // Support for jlong Atomic::load and Atomic::store on v9.
+  //
+  // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
+  //
+  // Arguments:
+  //      src:  O0
+  //      dest: O1
+  //
+  // Overwrites O2
+
+        .inline _Atomic_move_long_v9,2
+        .volatile
+        ldx     [%o0], %o2
+        stx     %o2, [%o1]
+        .nonvolatile
+        .end
 
   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
   //
--- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,14 +151,22 @@
   return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
 }
 
-extern "C" void _Atomic_load_long(volatile jlong* src, volatile jlong* dst);
+extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
 
 inline jlong Atomic::load(volatile jlong* src) {
   volatile jlong dest;
-  _Atomic_load_long(src, &dest);
+  _Atomic_move_long(src, &dest);
   return dest;
 }
 
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
 #endif // AMD64
 
 #ifdef _GNU_SOURCE
--- a/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 #define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
 
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "vm_version_x86.hpp"
 
@@ -80,11 +81,11 @@
 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
 
@@ -95,11 +96,11 @@
 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
 
@@ -123,11 +124,11 @@
 inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { *p = v; fence(); }
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { release_store(p, v); fence(); }
 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { *p = v; fence(); }
-inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store(p, v); fence(); }
 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
 
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -742,6 +742,13 @@
   sigaddset(&newset, sig);
   sigprocmask(SIG_UNBLOCK, &newset, NULL);
 
+  // Determine which sort of error to throw.  Out of swap may signal
+  // on the thread stack, which could get a mapping error when touched.
+  address addr = (address) info->si_addr;
+  if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) {
+    vm_exit_out_of_memory(0, "Out of swap space to map in thread stack.");
+  }
+
   VMError err(t, sig, pc, info, ucVoid);
   err.report_and_die();
 
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -104,8 +104,9 @@
       popl     %ebx
       .end
 
-  // Support for void Atomic::load(volatile jlong* src, volatile jlong* dest).
-      .inline _Atomic_load_long,2
+  // Support for jlong Atomic::load and Atomic::store.
+  // void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
+      .inline _Atomic_move_long,2
       movl     0(%esp), %eax   // src
       fildll    (%eax)
       movl     4(%esp), %eax   // dest
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2004, 2009, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,10 +137,10 @@
   return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
 }
 
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
 #else // !AMD64
 
-//inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-//inline void Atomic::store  (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
   int mp = os::is_MP();
   __asm {
@@ -254,6 +254,33 @@
 inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value) {
   return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
 }
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  volatile jlong* pdest = &dest;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, pdest
+    fistp    qword ptr [eax]
+  }
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  volatile jlong* src = &store_value;
+  __asm {
+    mov eax, src
+    fild     qword ptr [eax]
+    mov eax, dest
+    fistp    qword ptr [eax]
+  }
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  Atomic::store(store_value, (volatile jlong*)dest);
+}
+
 #endif // AMD64
 
 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
--- a/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
 
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "vm_version_x86.hpp"
 
@@ -65,11 +66,11 @@
 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
 
@@ -80,11 +81,11 @@
 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
 
@@ -188,7 +189,7 @@
 #endif // AMD64
 }
 
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { release_store(p, v); fence(); }
 
 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
--- a/src/share/tools/ProjectCreator/BuildConfig.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/tools/ProjectCreator/BuildConfig.java	Fri Jan 21 13:03:13 2011 -0800
@@ -22,8 +22,11 @@
  *
  */
 
-import java.util.*;
 import java.io.File;
+import java.util.Enumeration;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Vector;
 
 class BuildConfig {
     Hashtable vars;
@@ -57,7 +60,6 @@
 
         // ones mentioned above were needed to expand format
         String buildBase = expandFormat(getFieldString(null, "BuildBase"));
-        String jdkDir =  getFieldString(null, "JdkTargetRoot");
         String sourceBase = getFieldString(null, "SourceBase");
         String outDir = buildBase;
 
@@ -65,7 +67,7 @@
         put("OutputDir", outDir);
         put("SourceBase", sourceBase);
         put("BuildBase", buildBase);
-        put("OutputDll", jdkDir + Util.sep + outDll);
+        put("OutputDll", outDir + Util.sep + outDll);
 
         context = new String [] {flavourBuild, flavour, build, null};
     }
@@ -537,68 +539,75 @@
    }
 }
 
-class C1DebugConfig extends GenericDebugConfig {
+abstract class GenericDebugNonKernelConfig extends GenericDebugConfig {
+    protected void init(Vector includes, Vector defines) {
+        super.init(includes, defines);
+        getCI().getAdditionalNonKernelLinkerFlags(getV("LinkerFlags"));
+   }
+}
+
+class C1DebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getNoOptFlag();
     }
 
     C1DebugConfig() {
-        initNames("compiler1", "debug", "fastdebug\\jre\\bin\\client\\jvm.dll");
+        initNames("compiler1", "debug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
-class C1FastDebugConfig extends GenericDebugConfig {
+class C1FastDebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getOptFlag();
     }
 
     C1FastDebugConfig() {
-        initNames("compiler1", "fastdebug", "fastdebug\\jre\\bin\\client\\jvm.dll");
+        initNames("compiler1", "fastdebug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
-class C2DebugConfig extends GenericDebugConfig {
+class C2DebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getNoOptFlag();
     }
 
     C2DebugConfig() {
-        initNames("compiler2", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+        initNames("compiler2", "debug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
-class C2FastDebugConfig extends GenericDebugConfig {
+class C2FastDebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getOptFlag();
     }
 
     C2FastDebugConfig() {
-        initNames("compiler2", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+        initNames("compiler2", "fastdebug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
-class TieredDebugConfig extends GenericDebugConfig {
+class TieredDebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getNoOptFlag();
     }
 
     TieredDebugConfig() {
-        initNames("tiered", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+        initNames("tiered", "debug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
-class TieredFastDebugConfig extends GenericDebugConfig {
+class TieredFastDebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getOptFlag();
     }
 
     TieredFastDebugConfig() {
-        initNames("tiered", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll");
+        initNames("tiered", "fastdebug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
@@ -618,45 +627,45 @@
 
 class C1ProductConfig extends ProductConfig {
     C1ProductConfig() {
-        initNames("compiler1", "product", "jre\\bin\\client\\jvm.dll");
+        initNames("compiler1", "product", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
 class C2ProductConfig extends ProductConfig {
     C2ProductConfig() {
-        initNames("compiler2", "product", "jre\\bin\\server\\jvm.dll");
+        initNames("compiler2", "product", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
 class TieredProductConfig extends ProductConfig {
     TieredProductConfig() {
-        initNames("tiered", "product", "jre\\bin\\server\\jvm.dll");
+        initNames("tiered", "product", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
 
-class CoreDebugConfig extends GenericDebugConfig {
+class CoreDebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getNoOptFlag();
     }
 
     CoreDebugConfig() {
-        initNames("core", "debug", "fastdebug\\jre\\bin\\core\\jvm.dll");
+        initNames("core", "debug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
 
 
-class CoreFastDebugConfig extends GenericDebugConfig {
+class CoreFastDebugConfig extends GenericDebugNonKernelConfig {
     String getOptFlag() {
         return getCI().getOptFlag();
     }
 
     CoreFastDebugConfig() {
-        initNames("core", "fastdebug", "fastdebug\\jre\\bin\\core\\jvm.dll");
+        initNames("core", "fastdebug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
@@ -664,7 +673,7 @@
 
 class CoreProductConfig extends ProductConfig {
     CoreProductConfig() {
-        initNames("core", "product", "jre\\bin\\core\\jvm.dll");
+        initNames("core", "product", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
@@ -675,7 +684,7 @@
     }
 
     KernelDebugConfig() {
-        initNames("kernel", "debug", "fastdebug\\jre\\bin\\kernel\\jvm.dll");
+        initNames("kernel", "debug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
@@ -687,7 +696,7 @@
     }
 
     KernelFastDebugConfig() {
-        initNames("kernel", "fastdebug", "fastdebug\\jre\\bin\\kernel\\jvm.dll");
+        initNames("kernel", "fastdebug", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
@@ -695,7 +704,7 @@
 
 class KernelProductConfig extends ProductConfig {
     KernelProductConfig() {
-        initNames("kernel", "product", "jre\\bin\\kernel\\jvm.dll");
+        initNames("kernel", "product", "jvm.dll");
         init(getIncludes(), getDefines());
     }
 }
@@ -704,6 +713,7 @@
     abstract Vector getBaseLinkerFlags(String outDir, String outDll);
     abstract Vector getDebugCompilerFlags(String opt);
     abstract Vector getDebugLinkerFlags();
+    abstract void   getAdditionalNonKernelLinkerFlags(Vector rv);
     abstract Vector getProductCompilerFlags();
     abstract Vector getProductLinkerFlags();
     abstract String getOptFlag();
@@ -713,4 +723,14 @@
     void addAttr(Vector receiver, String attr, String value) {
         receiver.add(attr); receiver.add(value);
     }
+    void extAttr(Vector receiver, String attr, String value) {
+        int attr_pos=receiver.indexOf(attr) ;
+        if ( attr_pos == -1) {
+          // If attr IS NOT present in the Vector - add it
+          receiver.add(attr); receiver.add(value);
+        } else {
+          // If attr IS present in the Vector - append value to it
+          receiver.set(attr_pos+1,receiver.get(attr_pos+1)+value);
+        }
+    }
 }
--- a/src/share/tools/ProjectCreator/WinGammaPlatform.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/tools/ProjectCreator/WinGammaPlatform.java	Fri Jan 21 13:03:13 2011 -0800
@@ -22,8 +22,15 @@
  *
  */
 
-import java.io.*;
-import java.util.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Enumeration;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.List;
+import java.util.TreeSet;
+import java.util.Vector;
 
 abstract class HsArgHandler extends ArgHandler {
     static final int STRING = 1;
@@ -345,11 +352,23 @@
         new ArgsParser(args,
                        new ArgRule[]
             {
-                new HsArgRule("-sourceBase",
-                              "SourceBase",
-                              "   (Did you set the HotSpotWorkSpace environment variable?)",
-                              HsArgHandler.STRING
-                              ),
+                new ArgRule("-sourceBase",
+                            new HsArgHandler() {
+                                public void handle(ArgIterator it) {
+                                   String cfg = getCfg(it.get());
+                                   if (nextNotKey(it)) {
+                                      String sb = (String) it.get();
+                                      if (sb.endsWith(Util.sep)) {
+                                         sb = sb.substring(0, sb.length() - 1);
+                                      }
+                                      BuildConfig.putField(cfg, "SourceBase", sb);
+                                      it.next();
+                                   } else {
+                                      empty("-sourceBase", null);
+                                   }
+                                }
+                            }
+                            ),
 
                 new HsArgRule("-buildBase",
                               "BuildBase",
@@ -512,7 +531,6 @@
                             new HsArgHandler() {
                                 public void handle(ArgIterator it) {
                                     if (nextNotKey(it)) {
-                                        String build = it.get();
                                         if (nextNotKey(it)) {
                                             String description = it.get();
                                             if (nextNotKey(it)) {
@@ -528,7 +546,28 @@
                                     empty(null,  "** Error: wrong number of args to -prelink");
                                 }
                             }
-                            )
+                            ),
+
+                new ArgRule("-postbuild",
+                            new HsArgHandler() {
+                                public void handle(ArgIterator it) {
+                                    if (nextNotKey(it)) {
+                                        if (nextNotKey(it)) {
+                                            String description = it.get();
+                                            if (nextNotKey(it)) {
+                                                String command = it.get();
+                                                BuildConfig.putField(null, "PostbuildDescription", description);
+                                                BuildConfig.putField(null, "PostbuildCommand", command);
+                                                it.next();
+                                                return;
+                                            }
+                                        }
+                                    }
+
+                                    empty(null,  "** Error: wrong number of args to -postbuild");
+                                }
+                            }
+                            ),
             },
                                        new ArgHandler() {
                                            public void handle(ArgIterator it) {
@@ -618,10 +657,6 @@
 
         public int compareTo(Object o) {
             FileInfo oo = (FileInfo)o;
-            // Don't squelch identical short file names where the full
-            // paths are different
-            if (!attr.shortName.equals(oo.attr.shortName))
-              return attr.shortName.compareTo(oo.attr.shortName);
             return full.compareTo(oo.full);
         }
 
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC6.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC6.java	Fri Jan 21 13:03:13 2011 -0800
@@ -260,6 +260,8 @@
         return rv;
     }
 
+    void getAdditionalNonKernelLinkerFlags(Vector rv) {}
+
     Vector getProductCompilerFlags() {
         Vector rv = new Vector();
 
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Fri Jan 21 13:03:13 2011 -0800
@@ -22,8 +22,13 @@
  *
  */
 
-import java.io.*;
-import java.util.*;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.TreeSet;
+import java.util.Vector;
 
 public class WinGammaPlatformVC7 extends WinGammaPlatform {
 
@@ -104,7 +109,9 @@
 
 
         boolean match(FileInfo fi) {
-            return fi.full.regionMatches(true, baseLen, dir, 0, dirLen);
+           int lastSlashIndex = fi.full.lastIndexOf('/');
+           String fullDir = fi.full.substring(0, lastSlashIndex);
+           return fullDir.endsWith(dir);
         }
     }
 
@@ -217,65 +224,41 @@
     //   - container filter just provides a container to group together real filters
     //   - real filter can select elements from the set according to some rule, put it into XML
     //     and remove from the list
-    Vector makeFilters(TreeSet files) {
+    Vector makeFilters(TreeSet<FileInfo> files) {
         Vector rv = new Vector();
         String sbase = Util.normalize(BuildConfig.getFieldString(null, "SourceBase")+"/src/");
 
-        ContainerFilter rt = new ContainerFilter("Runtime");
-        rt.add(new DirectoryFilter("share/vm/prims", sbase));
-        rt.add(new DirectoryFilter("share/vm/runtime", sbase));
-        rt.add(new DirectoryFilter("share/vm/oops", sbase));
-        rv.add(rt);
-
-        ContainerFilter gc = new ContainerFilter("GC");
-        gc.add(new DirectoryFilter("share/vm/memory", sbase));
-        gc.add(new DirectoryFilter("share/vm/gc_interface", sbase));
+        String currentDir = "";
+        DirectoryFilter container = null;
+        for(FileInfo fileInfo : files) {
 
-        ContainerFilter gc_impl = new ContainerFilter("Implementations");
-        gc_impl.add(new DirectoryFilter("CMS",
-                                        "share/vm/gc_implementation/concurrentMarkSweep",
-                                        sbase));
-        gc_impl.add(new DirectoryFilter("Parallel Scavenge",
-                                        "share/vm/gc_implementation/parallelScavenge",
-                                        sbase));
-        gc_impl.add(new DirectoryFilter("Shared",
-                                        "share/vm/gc_implementation/shared",
-                                        sbase));
-        // for all leftovers
-        gc_impl.add(new DirectoryFilter("Misc",
-                                        "share/vm/gc_implementation",
-                                        sbase));
-
-        gc.add(gc_impl);
-        rv.add(gc);
+           if (!fileInfo.full.startsWith(sbase)) {
+              continue;
+           }
 
-        rv.add(new DirectoryFilter("C1", "share/vm/c1", sbase));
-
-        rv.add(new DirectoryFilter("C2", "share/vm/opto", sbase));
-
-        ContainerFilter comp = new ContainerFilter("Compiler Common");
-        comp.add(new DirectoryFilter("share/vm/asm", sbase));
-        comp.add(new DirectoryFilter("share/vm/ci", sbase));
-        comp.add(new DirectoryFilter("share/vm/code", sbase));
-        comp.add(new DirectoryFilter("share/vm/compiler", sbase));
-        rv.add(comp);
+           int lastSlash = fileInfo.full.lastIndexOf('/');
+           String dir = fileInfo.full.substring(sbase.length(), lastSlash);
+           if(dir.equals("share/vm")) {
+              // skip files directly in share/vm - should only be precompiled.hpp which is handled below
+              continue;
+           }
+           if (!dir.equals(currentDir)) {
+              currentDir = dir;
+              if (container != null) {
+                 rv.add(container);
+              }
 
-        rv.add(new DirectoryFilter("Interpreter",
-                                   "share/vm/interpreter",
-                                   sbase));
-
-        ContainerFilter misc = new ContainerFilter("Misc");
-        misc.add(new DirectoryFilter("share/vm/libadt", sbase));
-        misc.add(new DirectoryFilter("share/vm/services", sbase));
-        misc.add(new DirectoryFilter("share/vm/utilities", sbase));
-        misc.add(new DirectoryFilter("share/vm/classfile", sbase));
-        rv.add(misc);
-
-        rv.add(new DirectoryFilter("os_cpu", sbase));
-
-        rv.add(new DirectoryFilter("cpu", sbase));
-
-        rv.add(new DirectoryFilter("os", sbase));
+              // remove "share/vm/" from names
+              String name = dir;
+              if (dir.startsWith("share/vm/")) {
+                 name = dir.substring("share/vm/".length(), dir.length());
+              }
+              container = new DirectoryFilter(name, dir, sbase);
+           }
+        }
+        if (container != null) {
+           rv.add(container);
+        }
 
         ContainerFilter generated = new ContainerFilter("Generated");
         ContainerFilter c1Generated = new ContainerFilter("C1");
@@ -397,7 +380,6 @@
                                          "Name", cfg,
                                          "ExcludedFromBuild", "TRUE"
                                      });
-                            tag("Tool", new String[] {"Name", "VCCLCompilerTool"});
                             endTag("FileConfiguration");
 
                         }
@@ -441,7 +423,11 @@
 
         tag("Tool",
             new String[] {
-                "Name", "VCPostBuildEventTool"
+               "Name", "VCPostBuildEventTool",
+                "Description", BuildConfig.getFieldString(null, "PostbuildDescription"),
+                //Caution: String.replace(String,String) is available from JDK5 onwards only
+                "CommandLine", cfg.expandFormat(BuildConfig.getFieldString(null, "PostbuildCommand").replace
+                   ("\t", "&#x0D;&#x0A;"))
             }
             );
 
@@ -469,33 +455,6 @@
                 "Culture", "1033"
             }
             );
-        tag("Tool",
-            new String[] {
-              "Name", "VCWebServiceProxyGeneratorTool"
-            }
-            );
-
-        tag ("Tool",
-             new String[] {
-              "Name", "VCXMLDataGeneratorTool"
-             }
-             );
-
-        tag("Tool",
-            new String[] {
-              "Name", "VCWebDeploymentTool"
-            }
-            );
-        tag("Tool",
-             new String[] {
-            "Name", "VCManagedWrapperGeneratorTool"
-             }
-            );
-        tag("Tool",
-            new String[] {
-              "Name", "VCAuxiliaryManagedWrapperGeneratorTool"
-            }
-            );
 
         tag("Tool",
             new String[] {
@@ -597,7 +556,7 @@
         addAttr(rv, "PrecompiledHeaderFile", outDir+Util.sep+"vm.pch");
         addAttr(rv, "AssemblerListingLocation", outDir);
         addAttr(rv, "ObjectFile", outDir+Util.sep);
-        addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"vm.pdb");
+        addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"jvm.pdb");
         // Set /nologo optin
         addAttr(rv, "SuppressStartupBanner", "TRUE");
         // Surpass the default /Tc or /Tp. 0 is compileAsDefault
@@ -631,17 +590,22 @@
         addAttr(rv, "AdditionalOptions",
                 "/export:JNI_GetDefaultJavaVMInitArgs " +
                 "/export:JNI_CreateJavaVM " +
+                "/export:JVM_FindClassFromBootLoader "+
                 "/export:JNI_GetCreatedJavaVMs "+
                 "/export:jio_snprintf /export:jio_printf "+
                 "/export:jio_fprintf /export:jio_vfprintf "+
-                "/export:jio_vsnprintf ");
+                "/export:jio_vsnprintf "+
+                "/export:JVM_GetVersionInfo "+
+                "/export:JVM_GetThreadStateNames "+
+                "/export:JVM_GetThreadStateValues "+
+                "/export:JVM_InitAgentProperties ");
         addAttr(rv, "AdditionalDependencies", "Wsock32.lib winmm.lib");
         addAttr(rv, "OutputFile", outDll);
         // Set /INCREMENTAL option. 1 is linkIncrementalNo
         addAttr(rv, "LinkIncremental", "1");
         addAttr(rv, "SuppressStartupBanner", "TRUE");
         addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
-        addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"vm.pdb");
+        addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"jvm.pdb");
         // Set /SUBSYSTEM option. 2 is subSystemWindows
         addAttr(rv, "SubSystem", "2");
         addAttr(rv, "BaseAddress", "0x8000000");
@@ -682,6 +646,11 @@
         return rv;
     }
 
+    void getAdditionalNonKernelLinkerFlags(Vector rv) {
+        extAttr(rv, "AdditionalOptions",
+                "/export:AsyncGetCallTrace ");
+    }
+
     void getProductCompilerFlags_common(Vector rv) {
         // Set /O2 option. 2 is optimizeMaxSpeed
         addAttr(rv, "Optimization", "2");
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java	Fri Jan 21 13:03:13 2011 -0800
@@ -22,7 +22,7 @@
  *
  */
 
-import java.util.*;
+import java.util.Vector;
 
 public class WinGammaPlatformVC8 extends WinGammaPlatformVC7 {
 
@@ -41,6 +41,9 @@
         // Set /EHsc- option. 0 is cppExceptionHandlingNo
         addAttr(rv, "ExceptionHandling", "0");
 
+        // enable multi process builds
+        extAttr(rv, "AdditionalOptions", "/MP");
+
         return rv;
     }
 
--- a/src/share/tools/launcher/java.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/tools/launcher/java.c	Fri Jan 21 13:03:13 2011 -0800
@@ -275,6 +275,8 @@
                                jvmpath, sizeof(jvmpath),
                                original_argv);
 
+    printf("Using java runtime at: %s\n", jrepath);
+
     ifn.CreateJavaVM = 0;
     ifn.GetDefaultJavaVMInitArgs = 0;
 
--- a/src/share/tools/launcher/jli_util.c	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/tools/launcher/jli_util.c	Fri Jan 21 13:03:13 2011 -0800
@@ -1,3 +1,4 @@
+
 /*
  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -27,7 +28,7 @@
 #include "jli_util.h"
 
 #ifdef GAMMA
-#ifdef _WINDOWS
+#ifdef TARGET_OS_FAMILY_windows
 #define strdup _strdup
 #endif
 #endif
--- a/src/share/vm/c1/c1_Compilation.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/c1/c1_Compilation.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -245,7 +245,7 @@
 }
 
 
-void Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
+bool Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
   // Preinitialize the consts section to some large size:
   int locs_buffer_size = 20 * (relocInfo::length_limit + sizeof(relocInfo));
   char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
@@ -253,15 +253,20 @@
                                         locs_buffer_size / sizeof(relocInfo));
   code->initialize_consts_size(Compilation::desired_max_constant_size());
   // Call stubs + two deopt handlers (regular and MH) + exception handler
-  code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
-                              LIR_Assembler::exception_handler_size +
-                              2 * LIR_Assembler::deopt_handler_size);
+  int stub_size = (call_stub_estimate * LIR_Assembler::call_stub_size) +
+                   LIR_Assembler::exception_handler_size +
+                   (2 * LIR_Assembler::deopt_handler_size);
+  if (stub_size >= code->insts_capacity()) return false;
+  code->initialize_stubs_size(stub_size);
+  return true;
 }
 
 
 int Compilation::emit_code_body() {
   // emit code
-  setup_code_buffer(code(), allocator()->num_calls());
+  if (!setup_code_buffer(code(), allocator()->num_calls())) {
+    BAILOUT_("size requested greater than avail code buffer size", 0);
+  }
   code()->initialize_oop_recorder(env()->oop_recorder());
 
   _masm = new C1_MacroAssembler(code());
--- a/src/share/vm/c1/c1_Compilation.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/c1/c1_Compilation.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -192,7 +192,7 @@
     return desired_max_code_buffer_size() / 10;
   }
 
-  static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
+  static bool setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
 
   // timers
   static void print_timers();
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -319,24 +319,24 @@
 
       case Bytecodes::_tableswitch: {
         // set block for each case
-        Bytecode_tableswitch *switch_ = Bytecode_tableswitch_at(s.cur_bcp());
-        int l = switch_->length();
+        Bytecode_tableswitch sw(&s);
+        int l = sw.length();
         for (int i = 0; i < l; i++) {
-          make_block_at(cur_bci + switch_->dest_offset_at(i), current);
+          make_block_at(cur_bci + sw.dest_offset_at(i), current);
         }
-        make_block_at(cur_bci + switch_->default_offset(), current);
+        make_block_at(cur_bci + sw.default_offset(), current);
         current = NULL;
         break;
       }
 
       case Bytecodes::_lookupswitch: {
         // set block for each case
-        Bytecode_lookupswitch *switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
-        int l = switch_->number_of_pairs();
+        Bytecode_lookupswitch sw(&s);
+        int l = sw.number_of_pairs();
         for (int i = 0; i < l; i++) {
-          make_block_at(cur_bci + switch_->pair_at(i)->offset(), current);
+          make_block_at(cur_bci + sw.pair_at(i).offset(), current);
         }
-        make_block_at(cur_bci + switch_->default_offset(), current);
+        make_block_at(cur_bci + sw.default_offset(), current);
         current = NULL;
         break;
       }
@@ -1275,15 +1275,15 @@
 
 
 void GraphBuilder::table_switch() {
-  Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(method()->code() + bci());
-  const int l = switch_->length();
+  Bytecode_tableswitch sw(stream());
+  const int l = sw.length();
   if (CanonicalizeNodes && l == 1) {
     // total of 2 successors => use If instead of switch
     // Note: This code should go into the canonicalizer as soon as it can
     //       can handle canonicalized forms that contain more than one node.
-    Value key = append(new Constant(new IntConstant(switch_->low_key())));
-    BlockBegin* tsux = block_at(bci() + switch_->dest_offset_at(0));
-    BlockBegin* fsux = block_at(bci() + switch_->default_offset());
+    Value key = append(new Constant(new IntConstant(sw.low_key())));
+    BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
+    BlockBegin* fsux = block_at(bci() + sw.default_offset());
     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
@@ -1293,29 +1293,29 @@
     int i;
     bool has_bb = false;
     for (i = 0; i < l; i++) {
-      sux->at_put(i, block_at(bci() + switch_->dest_offset_at(i)));
-      if (switch_->dest_offset_at(i) < 0) has_bb = true;
+      sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
+      if (sw.dest_offset_at(i) < 0) has_bb = true;
     }
     // add default successor
-    sux->at_put(i, block_at(bci() + switch_->default_offset()));
+    sux->at_put(i, block_at(bci() + sw.default_offset()));
     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
-    append(new TableSwitch(ipop(), sux, switch_->low_key(), state_before, has_bb));
+    append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
   }
 }
 
 
 void GraphBuilder::lookup_switch() {
-  Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(method()->code() + bci());
-  const int l = switch_->number_of_pairs();
+  Bytecode_lookupswitch sw(stream());
+  const int l = sw.number_of_pairs();
   if (CanonicalizeNodes && l == 1) {
     // total of 2 successors => use If instead of switch
     // Note: This code should go into the canonicalizer as soon as it can
     //       can handle canonicalized forms that contain more than one node.
     // simplify to If
-    LookupswitchPair* pair = switch_->pair_at(0);
-    Value key = append(new Constant(new IntConstant(pair->match())));
-    BlockBegin* tsux = block_at(bci() + pair->offset());
-    BlockBegin* fsux = block_at(bci() + switch_->default_offset());
+    LookupswitchPair pair = sw.pair_at(0);
+    Value key = append(new Constant(new IntConstant(pair.match())));
+    BlockBegin* tsux = block_at(bci() + pair.offset());
+    BlockBegin* fsux = block_at(bci() + sw.default_offset());
     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
@@ -1326,13 +1326,13 @@
     int i;
     bool has_bb = false;
     for (i = 0; i < l; i++) {
-      LookupswitchPair* pair = switch_->pair_at(i);
-      if (pair->offset() < 0) has_bb = true;
-      sux->at_put(i, block_at(bci() + pair->offset()));
-      keys->at_put(i, pair->match());
+      LookupswitchPair pair = sw.pair_at(i);
+      if (pair.offset() < 0) has_bb = true;
+      sux->at_put(i, block_at(bci() + pair.offset()));
+      keys->at_put(i, pair.match());
     }
     // add default successor
-    sux->at_put(i, block_at(bci() + switch_->default_offset()));
+    sux->at_put(i, block_at(bci() + sw.default_offset()));
     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
     append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
   }
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1990,9 +1990,8 @@
 
   LIR_Opr reg = reg = rlock_result(x, x->basic_type());
 
+  get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
   if (x->is_volatile() && os::is_MP()) __ membar_acquire();
-  get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
-  if (x->is_volatile() && os::is_MP()) __ membar();
 }
 
 
@@ -2014,6 +2013,7 @@
 
   if (x->is_volatile() && os::is_MP()) __ membar_release();
   put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
+  if (x->is_volatile() && os::is_MP()) __ membar();
 }
 
 
--- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -369,7 +369,7 @@
   if (branch_bci != InvocationEntryBci) {
     // Compute desination bci
     address pc = method()->code_base() + branch_bci;
-    Bytecodes::Code branch = Bytecodes::code_at(pc, method());
+    Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
     int offset = 0;
     switch (branch) {
       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
@@ -659,14 +659,14 @@
 
 
 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
-  Bytecode_field* field_access = Bytecode_field_at(caller, bci);
+  Bytecode_field field_access(caller, bci);
   // This can be static or non-static field access
-  Bytecodes::Code code       = field_access->code();
+  Bytecodes::Code code       = field_access.code();
 
   // We must load class, initialize class and resolvethe field
   FieldAccessInfo result; // initialize class if needed
   constantPoolHandle constants(THREAD, caller->constants());
-  LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK_NULL);
+  LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
   return result.klass()();
 }
 
@@ -767,7 +767,7 @@
 
   Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc());
 
-  Bytecodes::Code code = Bytecode_at(caller_method->bcp_from(bci))->java_code();
+  Bytecodes::Code code = caller_method()->java_code_at(bci);
 
 #ifndef PRODUCT
   // this is used by assertions in the access_field_patching_id
@@ -779,11 +779,11 @@
   Handle load_klass(THREAD, NULL);                // oop needed by load_klass_patching code
   if (stub_id == Runtime1::access_field_patching_id) {
 
-    Bytecode_field* field_access = Bytecode_field_at(caller_method, bci);
+    Bytecode_field field_access(caller_method, bci);
     FieldAccessInfo result; // initialize class if needed
-    Bytecodes::Code code = field_access->code();
+    Bytecodes::Code code = field_access.code();
     constantPoolHandle constants(THREAD, caller_method->constants());
-    LinkResolver::resolve_field(result, constants, field_access->index(), Bytecodes::java_code(code), false, CHECK);
+    LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
     patch_field_offset = result.field_offset();
 
     // If we're patching a field which is volatile then at compile it
@@ -811,36 +811,36 @@
         }
         break;
       case Bytecodes::_new:
-        { Bytecode_new* bnew = Bytecode_new_at(caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(bnew->index(), CHECK);
+        { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
+          k = caller_method->constants()->klass_at(bnew.index(), CHECK);
         }
         break;
       case Bytecodes::_multianewarray:
-        { Bytecode_multianewarray* mna = Bytecode_multianewarray_at(caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(mna->index(), CHECK);
+        { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
+          k = caller_method->constants()->klass_at(mna.index(), CHECK);
         }
         break;
       case Bytecodes::_instanceof:
-        { Bytecode_instanceof* io = Bytecode_instanceof_at(caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(io->index(), CHECK);
+        { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
+          k = caller_method->constants()->klass_at(io.index(), CHECK);
         }
         break;
       case Bytecodes::_checkcast:
-        { Bytecode_checkcast* cc = Bytecode_checkcast_at(caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(cc->index(), CHECK);
+        { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
+          k = caller_method->constants()->klass_at(cc.index(), CHECK);
         }
         break;
       case Bytecodes::_anewarray:
-        { Bytecode_anewarray* anew = Bytecode_anewarray_at(caller_method->bcp_from(bci));
-          klassOop ek = caller_method->constants()->klass_at(anew->index(), CHECK);
+        { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
+          klassOop ek = caller_method->constants()->klass_at(anew.index(), CHECK);
           k = Klass::cast(ek)->array_klass(CHECK);
         }
         break;
       case Bytecodes::_ldc:
       case Bytecodes::_ldc_w:
         {
-          Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci);
-          k = cc->resolve_constant(CHECK);
+          Bytecode_loadconstant cc(caller_method, bci);
+          k = cc.resolve_constant(CHECK);
           assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant");
         }
         break;
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -233,6 +233,10 @@
 
   // compute size of arguments
   int arg_size = target->arg_size();
+  if (code == Bytecodes::_invokedynamic) {
+    assert(!target->is_static(), "receiver explicit in method");
+    arg_size--;  // implicit, not really on stack
+  }
   if (!target->is_loaded() && code == Bytecodes::_invokestatic) {
     arg_size--;
   }
@@ -250,6 +254,10 @@
     ArgumentMap arg = state._stack[i];
     skip_callee = !is_argument(arg) || !is_arg_stack(arg) || (directly_recursive && arg.is_singleton(i - arg_base));
   }
+  // For now we conservatively skip invokedynamic.
+  if (code == Bytecodes::_invokedynamic) {
+    skip_callee = true;
+  }
   if (skip_callee) {
     TRACE_BCEA(3, tty->print_cr("[EA] skipping method %s::%s", holder->name()->as_utf8(), target->name()->as_utf8()));
     for (i = 0; i < arg_size; i++) {
@@ -761,15 +769,15 @@
       case Bytecodes::_tableswitch:
         {
           state.spop();
-          Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp());
-          int len = switch_->length();
+          Bytecode_tableswitch sw(&s);
+          int len = sw.length();
           int dest_bci;
           for (int i = 0; i < len; i++) {
-            dest_bci = s.cur_bci() + switch_->dest_offset_at(i);
+            dest_bci = s.cur_bci() + sw.dest_offset_at(i);
             assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
             successors.push(_methodBlocks->block_containing(dest_bci));
           }
-          dest_bci = s.cur_bci() + switch_->default_offset();
+          dest_bci = s.cur_bci() + sw.default_offset();
           assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
           successors.push(_methodBlocks->block_containing(dest_bci));
           assert(s.next_bci() == limit_bci, "branch must end block");
@@ -779,15 +787,15 @@
       case Bytecodes::_lookupswitch:
         {
           state.spop();
-          Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
-          int len = switch_->number_of_pairs();
+          Bytecode_lookupswitch sw(&s);
+          int len = sw.number_of_pairs();
           int dest_bci;
           for (int i = 0; i < len; i++) {
-            dest_bci = s.cur_bci() + switch_->pair_at(i)->offset();
+            dest_bci = s.cur_bci() + sw.pair_at(i).offset();
             assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
             successors.push(_methodBlocks->block_containing(dest_bci));
           }
-          dest_bci = s.cur_bci() + switch_->default_offset();
+          dest_bci = s.cur_bci() + sw.default_offset();
           assert(_methodBlocks->is_block_start(dest_bci), "branch destination must start a block");
           successors.push(_methodBlocks->block_containing(dest_bci));
           fall_through = false;
--- a/src/share/vm/ci/ciEnv.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/ci/ciEnv.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -409,15 +409,15 @@
   } else {
     fail_type = _unloaded_ciinstance_klass;
   }
-  klassOop found_klass;
+  KlassHandle found_klass;
   if (!require_local) {
-    found_klass =
-      SystemDictionary::find_constrained_instance_or_array_klass(sym, loader,
-                                                                 KILL_COMPILE_ON_FATAL_(fail_type));
+    klassOop kls = SystemDictionary::find_constrained_instance_or_array_klass(
+        sym, loader, KILL_COMPILE_ON_FATAL_(fail_type));
+    found_klass = KlassHandle(THREAD, kls);
   } else {
-    found_klass =
-      SystemDictionary::find_instance_or_array_klass(sym, loader, domain,
-                                                     KILL_COMPILE_ON_FATAL_(fail_type));
+    klassOop kls = SystemDictionary::find_instance_or_array_klass(
+        sym, loader, domain, KILL_COMPILE_ON_FATAL_(fail_type));
+    found_klass = KlassHandle(THREAD, kls);
   }
 
   // If we fail to find an array klass, look again for its element type.
@@ -444,9 +444,9 @@
     }
   }
 
-  if (found_klass != NULL) {
+  if (found_klass() != NULL) {
     // Found it.  Build a CI handle.
-    return get_object(found_klass)->as_klass();
+    return get_object(found_klass())->as_klass();
   }
 
   if (require_local)  return NULL;
--- a/src/share/vm/ci/ciMethod.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/ci/ciMethod.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,7 +144,7 @@
 
   Bytecodes::Code java_code_at_bci(int bci) {
     address bcp = code() + bci;
-    return Bytecodes::java_code_at(bcp);
+    return Bytecodes::java_code_at(NULL, bcp);
   }
   BCEscapeAnalyzer  *get_bcea();
   ciMethodBlocks    *get_method_blocks();
--- a/src/share/vm/ci/ciMethodBlocks.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/ci/ciMethodBlocks.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -175,15 +175,15 @@
       case Bytecodes::_tableswitch :
         {
           cur_block->set_control_bci(bci);
-          Bytecode_tableswitch* switch_ = Bytecode_tableswitch_at(s.cur_bcp());
-          int len = switch_->length();
+          Bytecode_tableswitch sw(&s);
+          int len = sw.length();
           ciBlock *dest;
           int dest_bci;
           for (int i = 0; i < len; i++) {
-            dest_bci = s.cur_bci() + switch_->dest_offset_at(i);
+            dest_bci = s.cur_bci() + sw.dest_offset_at(i);
             dest = make_block_at(dest_bci);
           }
-          dest_bci = s.cur_bci() + switch_->default_offset();
+          dest_bci = s.cur_bci() + sw.default_offset();
           make_block_at(dest_bci);
           if (s.next_bci() < limit_bci) {
             dest = make_block_at(s.next_bci());
@@ -194,15 +194,15 @@
       case Bytecodes::_lookupswitch:
         {
           cur_block->set_control_bci(bci);
-          Bytecode_lookupswitch* switch_ = Bytecode_lookupswitch_at(s.cur_bcp());
-          int len = switch_->number_of_pairs();
+          Bytecode_lookupswitch sw(&s);
+          int len = sw.number_of_pairs();
           ciBlock *dest;
           int dest_bci;
           for (int i = 0; i < len; i++) {
-            dest_bci = s.cur_bci() + switch_->pair_at(i)->offset();
+            dest_bci = s.cur_bci() + sw.pair_at(i).offset();
             dest = make_block_at(dest_bci);
           }
-          dest_bci = s.cur_bci() + switch_->default_offset();
+          dest_bci = s.cur_bci() + sw.default_offset();
           dest = make_block_at(dest_bci);
           if (s.next_bci() < limit_bci) {
             dest = make_block_at(s.next_bci());
--- a/src/share/vm/ci/ciMethodHandle.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/ci/ciMethodHandle.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,11 +38,12 @@
 // Return an adapter for this MethodHandle.
 ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
   VM_ENTRY_MARK;
-
   Handle h(get_oop());
   methodHandle callee(_callee->get_methodOop());
-  MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD);
-  methodHandle m = mhc.compile(CHECK_NULL);
+  // We catch all exceptions here that could happen in the method
+  // handle compiler and stop the VM.
+  MethodHandleCompiler mhc(h, callee, is_invokedynamic, CATCH);
+  methodHandle m = mhc.compile(CATCH);
   return CURRENT_ENV->get_object(m())->as_method();
 }
 
--- a/src/share/vm/ci/ciStreams.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/ci/ciStreams.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,8 +78,8 @@
     else { assert(!is_wide(), "must not be a wide instruction"); }
   }
 
-  Bytecode* bytecode() const { return Bytecode_at(_bc_start); }
-  Bytecode* next_bytecode() const { return Bytecode_at(_pc); }
+  Bytecode bytecode() const { return Bytecode(this, _bc_start); }
+  Bytecode next_bytecode() const { return Bytecode(this, _pc); }
 
 public:
   // End-Of-Bytecodes
@@ -151,11 +151,11 @@
   bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
 
   int get_index_u1() const {
-    return bytecode()->get_index_u1(cur_bc_raw());
+    return bytecode().get_index_u1(cur_bc_raw());
   }
 
   int get_index_u1_cpcache() const {
-    return bytecode()->get_index_u1_cpcache(cur_bc_raw());
+    return bytecode().get_index_u1_cpcache(cur_bc_raw());
   }
 
   // Get a byte index following this bytecode.
@@ -169,29 +169,29 @@
 
   // Get 2-byte index (byte swapping depending on which bytecode)
   int get_index_u2(bool is_wide = false) const {
-    return bytecode()->get_index_u2(cur_bc_raw(), is_wide);
+    return bytecode().get_index_u2(cur_bc_raw(), is_wide);
   }
 
   // Get 2-byte index in native byte order.  (Rewriter::rewrite makes these.)
   int get_index_u2_cpcache() const {
-    return bytecode()->get_index_u2_cpcache(cur_bc_raw());
+    return bytecode().get_index_u2_cpcache(cur_bc_raw());
   }
 
   // Get 4-byte index, for invokedynamic.
   int get_index_u4() const {
-    return bytecode()->get_index_u4(cur_bc_raw());
+    return bytecode().get_index_u4(cur_bc_raw());
   }
 
   bool has_index_u4() const {
-    return bytecode()->has_index_u4(cur_bc_raw());
+    return bytecode().has_index_u4(cur_bc_raw());
   }
 
   // Get dimensions byte (multinewarray)
   int get_dimensions() const { return *(unsigned char*)(_pc-1); }
 
   // Sign-extended index byte/short, no widening
-  int get_constant_u1()                     const { return bytecode()->get_constant_u1(instruction_size()-1, cur_bc_raw()); }
-  int get_constant_u2(bool is_wide = false) const { return bytecode()->get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
+  int get_constant_u1()                     const { return bytecode().get_constant_u1(instruction_size()-1, cur_bc_raw()); }
+  int get_constant_u2(bool is_wide = false) const { return bytecode().get_constant_u2(instruction_size()-2, cur_bc_raw(), is_wide); }
 
   // Get a byte signed constant for "iinc".  Invalid for other bytecodes.
   // If prefixed with a wide bytecode, get a wide constant
@@ -199,18 +199,18 @@
 
   // 2-byte branch offset from current pc
   int get_dest() const {
-    return cur_bci() + bytecode()->get_offset_s2(cur_bc_raw());
+    return cur_bci() + bytecode().get_offset_s2(cur_bc_raw());
   }
 
   // 2-byte branch offset from next pc
   int next_get_dest() const {
     assert(_pc < _end, "");
-    return next_bci() + next_bytecode()->get_offset_s2(Bytecodes::_ifeq);
+    return next_bci() + next_bytecode().get_offset_s2(Bytecodes::_ifeq);
   }
 
   // 4-byte branch offset from current pc
   int get_far_dest() const {
-    return cur_bci() + bytecode()->get_offset_s4(cur_bc_raw());
+    return cur_bci() + bytecode().get_offset_s4(cur_bc_raw());
   }
 
   // For a lookup or switch table, return target destination
@@ -407,4 +407,11 @@
   }
 };
 
+
+
+// Implementation for declarations in bytecode.hpp
+Bytecode::Bytecode(const ciBytecodeStream* stream, address bcp): _bcp(bcp != NULL ? bcp : stream->cur_bcp()), _code(Bytecodes::code_at(NULL, addr_at(0))) {}
+Bytecode_lookupswitch::Bytecode_lookupswitch(const ciBytecodeStream* stream): Bytecode(stream) { verify(); }
+Bytecode_tableswitch::Bytecode_tableswitch(const ciBytecodeStream* stream): Bytecode(stream) { verify(); }
+
 #endif // SHARE_VM_CI_CISTREAMS_HPP
--- a/src/share/vm/ci/ciTypeFlow.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1698,18 +1698,17 @@
         break;
 
       case Bytecodes::_tableswitch:  {
-        Bytecode_tableswitch *tableswitch =
-          Bytecode_tableswitch_at(str->cur_bcp());
+        Bytecode_tableswitch tableswitch(str);
 
-        int len = tableswitch->length();
+        int len = tableswitch.length();
         _successors =
           new (arena) GrowableArray<Block*>(arena, len+1, 0, NULL);
-        int bci = current_bci + tableswitch->default_offset();
+        int bci = current_bci + tableswitch.default_offset();
         Block* block = analyzer->block_at(bci, jsrs);
         assert(_successors->length() == SWITCH_DEFAULT, "");
         _successors->append(block);
         while (--len >= 0) {
-          int bci = current_bci + tableswitch->dest_offset_at(len);
+          int bci = current_bci + tableswitch.dest_offset_at(len);
           block = analyzer->block_at(bci, jsrs);
           assert(_successors->length() >= SWITCH_CASES, "");
           _successors->append_if_missing(block);
@@ -1718,19 +1717,18 @@
       }
 
       case Bytecodes::_lookupswitch: {
-        Bytecode_lookupswitch *lookupswitch =
-          Bytecode_lookupswitch_at(str->cur_bcp());
+        Bytecode_lookupswitch lookupswitch(str);
 
-        int npairs = lookupswitch->number_of_pairs();
+        int npairs = lookupswitch.number_of_pairs();
         _successors =
           new (arena) GrowableArray<Block*>(arena, npairs+1, 0, NULL);
-        int bci = current_bci + lookupswitch->default_offset();
+        int bci = current_bci + lookupswitch.default_offset();
         Block* block = analyzer->block_at(bci, jsrs);
         assert(_successors->length() == SWITCH_DEFAULT, "");
         _successors->append(block);
         while(--npairs >= 0) {
-          LookupswitchPair *pair = lookupswitch->pair_at(npairs);
-          int bci = current_bci + pair->offset();
+          LookupswitchPair pair = lookupswitch.pair_at(npairs);
+          int bci = current_bci + pair.offset();
           Block* block = analyzer->block_at(bci, jsrs);
           assert(_successors->length() >= SWITCH_CASES, "");
           _successors->append_if_missing(block);
--- a/src/share/vm/classfile/classLoader.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/classfile/classLoader.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1382,3 +1382,61 @@
 }
 
 #endif //PRODUCT
+
+// Please keep following two functions at end of this file. With them placed at top or in middle of the file,
+// they could get inlined by agressive compiler, an unknown trick, see bug 6966589.
+void PerfClassTraceTime::initialize() {
+  if (!UsePerfData) return;
+
+  if (_eventp != NULL) {
+    // increment the event counter
+    _eventp->inc();
+  }
+
+  // stop the current active thread-local timer to measure inclusive time
+  _prev_active_event = -1;
+  for (int i=0; i < EVENT_TYPE_COUNT; i++) {
+     if (_timers[i].is_active()) {
+       assert(_prev_active_event == -1, "should have only one active timer");
+       _prev_active_event = i;
+       _timers[i].stop();
+     }
+  }
+
+  if (_recursion_counters == NULL || (_recursion_counters[_event_type])++ == 0) {
+    // start the inclusive timer if not recursively called
+    _t.start();
+  }
+
+  // start thread-local timer of the given event type
+   if (!_timers[_event_type].is_active()) {
+    _timers[_event_type].start();
+  }
+}
+
+PerfClassTraceTime::~PerfClassTraceTime() {
+  if (!UsePerfData) return;
+
+  // stop the thread-local timer as the event completes
+  // and resume the thread-local timer of the event next on the stack
+  _timers[_event_type].stop();
+  jlong selftime = _timers[_event_type].ticks();
+
+  if (_prev_active_event >= 0) {
+    _timers[_prev_active_event].start();
+  }
+
+  if (_recursion_counters != NULL && --(_recursion_counters[_event_type]) > 0) return;
+
+  // increment the counters only on the leaf call
+  _t.stop();
+  _timep->inc(_t.ticks());
+  if (_selftimep != NULL) {
+    _selftimep->inc(selftime);
+  }
+  // add all class loading related event selftime to the accumulated time counter
+  ClassLoader::perf_accumulated_time()->inc(selftime);
+
+  // reset the timer
+  _timers[_event_type].reset();
+}
--- a/src/share/vm/classfile/classLoader.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/classfile/classLoader.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -356,111 +356,57 @@
 // (i.e. only one event type) are active at a time even multiple PerfClassTraceTime
 // instances have been created as multiple events are happening.
 class PerfClassTraceTime {
-  public:
-    enum {
-       CLASS_LOAD   = 0,
-       PARSE_CLASS  = 1,
-       CLASS_LINK   = 2,
-       CLASS_VERIFY = 3,
-       CLASS_CLINIT = 4,
-       DEFINE_CLASS = 5,
-       EVENT_TYPE_COUNT = 6
-    };
-  protected:
-    // _t tracks time from initialization to destruction of this timer instance
-    // including time for all other event types, and recursive calls of this type.
-    // When a timer is called recursively, the elapsedTimer _t would not be used.
-    elapsedTimer     _t;
-    PerfLongCounter* _timep;
-    PerfLongCounter* _selftimep;
-    PerfLongCounter* _eventp;
-    // pointer to thread-local recursion counter and timer array
-    // The thread_local timers track cumulative time for specific event types
-    // exclusive of time for other event types, but including recursive calls
-    // of the same type.
-    int*             _recursion_counters;
-    elapsedTimer*    _timers;
-    int              _event_type;
-    int              _prev_active_event;
-
-  public:
-
-    inline PerfClassTraceTime(PerfLongCounter* timep,     /* counter incremented with inclusive time */
-                              PerfLongCounter* selftimep, /* counter incremented with exclusive time */
-                              PerfLongCounter* eventp,    /* event counter */
-                              int* recursion_counters,    /* thread-local recursion counter array */
-                              elapsedTimer* timers,       /* thread-local timer array */
-                              int type                    /* event type */ ) :
-        _timep(timep), _selftimep(selftimep), _eventp(eventp), _recursion_counters(recursion_counters), _timers(timers), _event_type(type) {
-      initialize();
-    }
-
-    inline PerfClassTraceTime(PerfLongCounter* timep,     /* counter incremented with inclusive time */
-                              elapsedTimer* timers,       /* thread-local timer array */
-                              int type                    /* event type */ ) :
-        _timep(timep), _selftimep(NULL), _eventp(NULL), _recursion_counters(NULL), _timers(timers), _event_type(type) {
-      initialize();
-    }
-
-    void initialize() {
-      if (!UsePerfData) return;
+ public:
+  enum {
+    CLASS_LOAD   = 0,
+    PARSE_CLASS  = 1,
+    CLASS_LINK   = 2,
+    CLASS_VERIFY = 3,
+    CLASS_CLINIT = 4,
+    DEFINE_CLASS = 5,
+    EVENT_TYPE_COUNT = 6
+  };
+ protected:
+  // _t tracks time from initialization to destruction of this timer instance
+  // including time for all other event types, and recursive calls of this type.
+  // When a timer is called recursively, the elapsedTimer _t would not be used.
+  elapsedTimer     _t;
+  PerfLongCounter* _timep;
+  PerfLongCounter* _selftimep;
+  PerfLongCounter* _eventp;
+  // pointer to thread-local recursion counter and timer array
+  // The thread_local timers track cumulative time for specific event types
+  // exclusive of time for other event types, but including recursive calls
+  // of the same type.
+  int*             _recursion_counters;
+  elapsedTimer*    _timers;
+  int              _event_type;
+  int              _prev_active_event;
 
-      if (_eventp != NULL) {
-        // increment the event counter
-        _eventp->inc();
-      }
+ public:
 
-      // stop the current active thread-local timer to measure inclusive time
-      _prev_active_event = -1;
-      for (int i=0; i < EVENT_TYPE_COUNT; i++) {
-         if (_timers[i].is_active()) {
-           assert(_prev_active_event == -1, "should have only one active timer");
-           _prev_active_event = i;
-           _timers[i].stop();
-         }
-      }
-
-      if (_recursion_counters == NULL || (_recursion_counters[_event_type])++ == 0) {
-        // start the inclusive timer if not recursively called
-        _t.start();
-      }
-
-      // start thread-local timer of the given event type
-      if (!_timers[_event_type].is_active()) {
-        _timers[_event_type].start();
-      }
-    }
+  inline PerfClassTraceTime(PerfLongCounter* timep,     /* counter incremented with inclusive time */
+                            PerfLongCounter* selftimep, /* counter incremented with exclusive time */
+                            PerfLongCounter* eventp,    /* event counter */
+                            int* recursion_counters,    /* thread-local recursion counter array */
+                            elapsedTimer* timers,       /* thread-local timer array */
+                            int type                    /* event type */ ) :
+      _timep(timep), _selftimep(selftimep), _eventp(eventp), _recursion_counters(recursion_counters), _timers(timers), _event_type(type) {
+    initialize();
+  }
 
-    inline void suspend() { _t.stop(); _timers[_event_type].stop(); }
-    inline void resume()  { _t.start(); _timers[_event_type].start(); }
-
-    ~PerfClassTraceTime() {
-      if (!UsePerfData) return;
-
-      // stop the thread-local timer as the event completes
-      // and resume the thread-local timer of the event next on the stack
-      _timers[_event_type].stop();
-      jlong selftime = _timers[_event_type].ticks();
-
-      if (_prev_active_event >= 0) {
-        _timers[_prev_active_event].start();
-      }
+  inline PerfClassTraceTime(PerfLongCounter* timep,     /* counter incremented with inclusive time */
+                            elapsedTimer* timers,       /* thread-local timer array */
+                            int type                    /* event type */ ) :
+      _timep(timep), _selftimep(NULL), _eventp(NULL), _recursion_counters(NULL), _timers(timers), _event_type(type) {
+    initialize();
+  }
 
-      if (_recursion_counters != NULL && --(_recursion_counters[_event_type]) > 0) return;
+  inline void suspend() { _t.stop(); _timers[_event_type].stop(); }
+  inline void resume()  { _t.start(); _timers[_event_type].start(); }
 
-      // increment the counters only on the leaf call
-      _t.stop();
-      _timep->inc(_t.ticks());
-      if (_selftimep != NULL) {
-        _selftimep->inc(selftime);
-      }
-      // add all class loading related event selftime to the accumulated time counter
-      ClassLoader::perf_accumulated_time()->inc(selftime);
-
-      // reset the timer
-      _timers[_event_type].reset();
-    }
+  ~PerfClassTraceTime();
+  void initialize();
 };
 
-
 #endif // SHARE_VM_CLASSFILE_CLASSLOADER_HPP
--- a/src/share/vm/classfile/systemDictionary.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/classfile/systemDictionary.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -172,6 +172,8 @@
                                                                               \
   template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
                                                                               \
+  template(sun_misc_PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt)       \
+                                                                              \
   /* Preload boxing klasses */                                                \
   template(Boolean_klass,                java_lang_Boolean,              Pre) \
   template(Character_klass,              java_lang_Character,            Pre) \
--- a/src/share/vm/classfile/vmSymbols.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/classfile/vmSymbols.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,6 +111,7 @@
   template(sun_jkernel_DownloadManager,               "sun/jkernel/DownloadManager")              \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
   template(setBootClassLoaderHook_name,               "setBootClassLoaderHook")                   \
+  template(sun_misc_PostVMInitHook,                   "sun/misc/PostVMInitHook")                  \
                                                                                                   \
   /* class file format tags */                                                                    \
   template(tag_source_file,                           "SourceFile")                               \
--- a/src/share/vm/code/nmethod.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/code/nmethod.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -619,8 +619,8 @@
   OopMapSet* oop_maps )
   : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
-  _compiled_synchronized_native_basic_lock_owner_sp_offset(basic_lock_owner_sp_offset),
-  _compiled_synchronized_native_basic_lock_sp_offset(basic_lock_sp_offset)
+  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
+  _native_basic_lock_sp_offset(basic_lock_sp_offset)
 {
   {
     debug_only(No_Safepoint_Verifier nsv;)
@@ -696,8 +696,8 @@
   int frame_size)
   : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),
-  _compiled_synchronized_native_basic_lock_owner_sp_offset(in_ByteSize(-1)),
-  _compiled_synchronized_native_basic_lock_sp_offset(in_ByteSize(-1))
+  _native_receiver_sp_offset(in_ByteSize(-1)),
+  _native_basic_lock_sp_offset(in_ByteSize(-1))
 {
   {
     debug_only(No_Safepoint_Verifier nsv;)
@@ -790,8 +790,8 @@
   )
   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
-  _compiled_synchronized_native_basic_lock_owner_sp_offset(in_ByteSize(-1)),
-  _compiled_synchronized_native_basic_lock_sp_offset(in_ByteSize(-1))
+  _native_receiver_sp_offset(in_ByteSize(-1)),
+  _native_basic_lock_sp_offset(in_ByteSize(-1))
 {
   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
   {
@@ -1863,9 +1863,9 @@
 #ifndef SHARK
   if (!method()->is_native()) {
     SimpleScopeDesc ssd(this, fr.pc());
-    Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
-    bool has_receiver = call->has_receiver();
-    symbolOop signature = call->signature();
+    Bytecode_invoke call(ssd.method(), ssd.bci());
+    bool has_receiver = call.has_receiver();
+    symbolOop signature = call.signature();
     fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
   }
 #endif // !SHARK
@@ -2698,8 +2698,7 @@
       } else if (sd->method()->is_native()) {
         st->print("method is native");
       } else {
-        address bcp  = sd->method()->bcp_from(sd->bci());
-        Bytecodes::Code bc = Bytecodes::java_code_at(bcp);
+        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
         st->print(";*%s", Bytecodes::name(bc));
         switch (bc) {
         case Bytecodes::_invokevirtual:
@@ -2707,10 +2706,10 @@
         case Bytecodes::_invokestatic:
         case Bytecodes::_invokeinterface:
           {
-            Bytecode_invoke* invoke = Bytecode_invoke_at(sd->method(), sd->bci());
+            Bytecode_invoke invoke(sd->method(), sd->bci());
             st->print(" ");
-            if (invoke->name() != NULL)
-              invoke->name()->print_symbol_on(st);
+            if (invoke.name() != NULL)
+              invoke.name()->print_symbol_on(st);
             else
               st->print("<UNKNOWN>");
             break;
@@ -2720,10 +2719,10 @@
         case Bytecodes::_getstatic:
         case Bytecodes::_putstatic:
           {
-            Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci());
+            Bytecode_field field(sd->method(), sd->bci());
             st->print(" ");
-            if (field->name() != NULL)
-              field->name()->print_symbol_on(st);
+            if (field.name() != NULL)
+              field.name()->print_symbol_on(st);
             else
               st->print("<UNKNOWN>");
           }
--- a/src/share/vm/code/nmethod.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/code/nmethod.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -210,7 +210,7 @@
   ExceptionCache *_exception_cache;
   PcDescCache     _pc_desc_cache;
 
-  // These are only used for compiled synchronized native methods to
+  // These are used for compiled synchronized native methods to
   // locate the owner and stack slot for the BasicLock so that we can
   // properly revoke the bias of the owner if necessary. They are
   // needed because there is no debug information for compiled native
@@ -220,8 +220,10 @@
   // sharing between platforms. Note that currently biased locking
   // will never cause Class instances to be biased but this code
   // handles the static synchronized case as well.
-  ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
-  ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
+  // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
+  // for non-static native wrapper frames.
+  ByteSize _native_receiver_sp_offset;
+  ByteSize _native_basic_lock_sp_offset;
 
   friend class nmethodLocker;
 
@@ -676,11 +678,11 @@
   bool is_patchable_at(address instr_address);
 
   // UseBiasedLocking support
-  ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
-    return _compiled_synchronized_native_basic_lock_owner_sp_offset;
+  ByteSize native_receiver_sp_offset() {
+    return _native_receiver_sp_offset;
   }
-  ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
-    return _compiled_synchronized_native_basic_lock_sp_offset;
+  ByteSize native_basic_lock_sp_offset() {
+    return _native_basic_lock_sp_offset;
   }
 
   // support for code generation
--- a/src/share/vm/compiler/methodLiveness.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/compiler/methodLiveness.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -286,16 +286,15 @@
         break;
       case Bytecodes::_tableswitch:
         {
-          Bytecode_tableswitch *tableswitch =
-            Bytecode_tableswitch_at(bytes.cur_bcp());
+          Bytecode_tableswitch tableswitch(&bytes);
 
-          int len = tableswitch->length();
+          int len = tableswitch.length();
 
-          dest = _block_map->at(bci + tableswitch->default_offset());
+          dest = _block_map->at(bci + tableswitch.default_offset());
           assert(dest != NULL, "branch desination must start a block.");
           dest->add_normal_predecessor(current_block);
           while (--len >= 0) {
-            dest = _block_map->at(bci + tableswitch->dest_offset_at(len));
+            dest = _block_map->at(bci + tableswitch.dest_offset_at(len));
             assert(dest != NULL, "branch desination must start a block.");
             dest->add_normal_predecessor(current_block);
           }
@@ -304,17 +303,16 @@
 
       case Bytecodes::_lookupswitch:
         {
-          Bytecode_lookupswitch *lookupswitch =
-            Bytecode_lookupswitch_at(bytes.cur_bcp());
+          Bytecode_lookupswitch lookupswitch(&bytes);
 
-          int npairs = lookupswitch->number_of_pairs();
+          int npairs = lookupswitch.number_of_pairs();
 
-          dest = _block_map->at(bci + lookupswitch->default_offset());
+          dest = _block_map->at(bci + lookupswitch.default_offset());
           assert(dest != NULL, "branch desination must start a block.");
           dest->add_normal_predecessor(current_block);
           while(--npairs >= 0) {
-            LookupswitchPair *pair = lookupswitch->pair_at(npairs);
-            dest = _block_map->at( bci + pair->offset());
+            LookupswitchPair pair = lookupswitch.pair_at(npairs);
+            dest = _block_map->at( bci + pair.offset());
             assert(dest != NULL, "branch desination must start a block.");
             dest->add_normal_predecessor(current_block);
           }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -3478,6 +3478,7 @@
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
   TraceCMSMemoryManagerStats tms(_collectorState);
+
   ReferenceProcessor* rp = ref_processor();
   SpecializationStats::clear();
   assert(_restart_addr == NULL, "Control point invariant");
@@ -4978,6 +4979,7 @@
   if (should_unload_classes()) {
     CodeCache::gc_epilogue();
   }
+  JvmtiExport::gc_epilogue();
 
   // If we encountered any (marking stack / work queue) overflow
   // events during the current CMS cycle, take appropriate
@@ -5940,11 +5942,6 @@
   }
   rp->verify_no_references_recorded();
   assert(!rp->discovery_enabled(), "should have been disabled");
-
-  // JVMTI object tagging is based on JNI weak refs. If any of these
-  // refs were cleared then JVMTI needs to update its maps and
-  // maybe post ObjectFrees to agents.
-  JvmtiExport::cms_ref_processing_epilogue();
 }
 
 #ifndef PRODUCT
@@ -6305,6 +6302,7 @@
 
   switch (op) {
     case CMS_op_checkpointRootsInitial: {
+      SvcGCMarker sgcm(SvcGCMarker::OTHER);
       checkpointRootsInitial(true);       // asynch
       if (PrintGC) {
         _cmsGen->printOccupancy("initial-mark");
@@ -6312,6 +6310,7 @@
       break;
     }
     case CMS_op_checkpointRootsFinal: {
+      SvcGCMarker sgcm(SvcGCMarker::OTHER);
       checkpointRootsFinal(true,    // asynch
                            false,   // !clear_all_soft_refs
                            false);  // !init_mark_was_synchronous
@@ -7881,25 +7880,23 @@
 }
 
 // We need this destructor to reclaim any space at the end
-// of the space, which do_blk below may not have added back to
-// the free lists. [basically dealing with the "fringe effect"]
+// of the space, which do_blk below may not yet have added back to
+// the free lists.
 SweepClosure::~SweepClosure() {
   assert_lock_strong(_freelistLock);
-  // this should be treated as the end of a free run if any
-  // The current free range should be returned to the free lists
-  // as one coalesced chunk.
+  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
+         "sweep _limit out of bounds");
+  // Flush any remaining coterminal free run as a single
+  // coalesced chunk to the appropriate free list.
   if (inFreeRange()) {
-    flushCurFreeChunk(freeFinger(),
-      pointer_delta(_limit, freeFinger()));
-    assert(freeFinger() < _limit, "the finger pointeth off base");
+    assert(freeFinger() < _limit, "freeFinger points too high");
+    flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
     if (CMSTraceSweeper) {
-      gclog_or_tty->print("destructor:");
-      gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
-                 "[coalesced:"SIZE_FORMAT"]\n",
-                 freeFinger(), pointer_delta(_limit, freeFinger()),
-                 lastFreeRangeCoalesced());
-    }
-  }
+      gclog_or_tty->print("Sweep: last chunk: ");
+      gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
+                          freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
+    }
+  } // else nothing to flush
   NOT_PRODUCT(
     if (Verbose && PrintGC) {
       gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
@@ -7936,9 +7933,8 @@
 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
     bool freeRangeInFreeLists) {
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
-               freeFinger, _sp->block_size(freeFinger),
-               freeRangeInFreeLists);
+    gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
+               freeFinger, freeRangeInFreeLists);
   }
   assert(!inFreeRange(), "Trampling existing free range");
   set_inFreeRange(true);
@@ -7993,21 +7989,36 @@
   // may have caused us to coalesce the block ending at the address _limit
   // with a newly expanded chunk (this happens when _limit was set to the
   // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
-  if (addr >= _limit) { // we have swept up to or past the limit, do nothing more
+  if (addr >= _limit) { // we have swept up to or past the limit: finish up
     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
            "sweep _limit out of bounds");
     assert(addr < _sp->end(), "addr out of bounds");
-    // help the closure application finish
+    // Flush any remaining coterminal free run as a single
+    // coalesced chunk to the appropriate free list.
+    if (inFreeRange()) {
+      assert(freeFinger() < _limit, "finger points too high");
+      flush_cur_free_chunk(freeFinger(),
+                           pointer_delta(addr, freeFinger()));
+      if (CMSTraceSweeper) {
+        gclog_or_tty->print("Sweep: last chunk: ");
+        gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
+                   "[coalesced:"SIZE_FORMAT"]\n",
+                   freeFinger(), pointer_delta(addr, freeFinger()),
+                   lastFreeRangeCoalesced());
+      }
+    }
+
+    // help the iterator loop finish
     return pointer_delta(_sp->end(), addr);
   }
+
   assert(addr < _limit, "sweep invariant");
-
   // check if we should yield
   do_yield_check(addr);
   if (fc->isFree()) {
     // Chunk that is already free
     res = fc->size();
-    doAlreadyFreeChunk(fc);
+    do_already_free_chunk(fc);
     debug_only(_sp->verifyFreeLists());
     assert(res == fc->size(), "Don't expect the size to change");
     NOT_PRODUCT(
@@ -8017,7 +8028,7 @@
     NOT_PRODUCT(_last_fc = fc;)
   } else if (!_bitMap->isMarked(addr)) {
     // Chunk is fresh garbage
-    res = doGarbageChunk(fc);
+    res = do_garbage_chunk(fc);
     debug_only(_sp->verifyFreeLists());
     NOT_PRODUCT(
       _numObjectsFreed++;
@@ -8025,7 +8036,7 @@
     )
   } else {
     // Chunk that is alive.
-    res = doLiveChunk(fc);
+    res = do_live_chunk(fc);
     debug_only(_sp->verifyFreeLists());
     NOT_PRODUCT(
         _numObjectsLive++;
@@ -8078,7 +8089,7 @@
 // to a free list which may be overpopulated.
 //
 
-void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
+void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
   size_t size = fc->size();
   // Chunks that cannot be coalesced are not in the
   // free lists.
@@ -8094,23 +8105,23 @@
   // addr and purported end of this block.
   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
 
-  // Some chunks cannot be coalesced in under any circumstances.
+  // Some chunks cannot be coalesced under any circumstances.
   // See the definition of cantCoalesce().
   if (!fc->cantCoalesce()) {
     // This chunk can potentially be coalesced.
     if (_sp->adaptive_freelists()) {
       // All the work is done in
-      doPostIsFreeOrGarbageChunk(fc, size);
+      do_post_free_or_garbage_chunk(fc, size);
     } else {  // Not adaptive free lists
       // this is a free chunk that can potentially be coalesced by the sweeper;
       if (!inFreeRange()) {
         // if the next chunk is a free block that can't be coalesced
         // it doesn't make sense to remove this chunk from the free lists
         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
-        assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
-        if ((HeapWord*)nextChunk < _limit  &&    // there's a next chunk...
-            nextChunk->isFree()    &&            // which is free...
-            nextChunk->cantCoalesce()) {         // ... but cant be coalesced
+        assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
+        if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
+            nextChunk->isFree()               &&     // ... which is free...
+            nextChunk->cantCoalesce()) {             // ... but can't be coalesced
           // nothing to do
         } else {
           // Potentially the start of a new free range:
@@ -8156,14 +8167,14 @@
     // as the end of a free run if any
     if (inFreeRange()) {
       // we kicked some butt; time to pick up the garbage
-      assert(freeFinger() < addr, "the finger pointeth off base");
-      flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
+      assert(freeFinger() < addr, "freeFinger points too high");
+      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
     }
     // else, nothing to do, just continue
   }
 }
 
-size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
+size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
   // This is a chunk of garbage.  It is not in any free list.
   // Add it to a free list or let it possibly be coalesced into
   // a larger chunk.
@@ -8175,7 +8186,7 @@
     // addr and purported end of just dead object.
     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
 
-    doPostIsFreeOrGarbageChunk(fc, size);
+    do_post_free_or_garbage_chunk(fc, size);
   } else {
     if (!inFreeRange()) {
       // start of a new free range
@@ -8214,35 +8225,16 @@
   return size;
 }
 
-size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
+size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
   HeapWord* addr = (HeapWord*) fc;
   // The sweeper has just found a live object. Return any accumulated
   // left hand chunk to the free lists.
   if (inFreeRange()) {
-    if (_sp->adaptive_freelists()) {
-      flushCurFreeChunk(freeFinger(),
-                        pointer_delta(addr, freeFinger()));
-    } else { // not adaptive freelists
-      set_inFreeRange(false);
-      // Add the free range back to the free list if it is not already
-      // there.
-      if (!freeRangeInFreeLists()) {
-        assert(freeFinger() < addr, "the finger pointeth off base");
-        if (CMSTraceSweeper) {
-          gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
-            "[coalesced:%d]\n",
-            freeFinger(), pointer_delta(addr, freeFinger()),
-            lastFreeRangeCoalesced());
-        }
-        _sp->addChunkAndRepairOffsetTable(freeFinger(),
-          pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
-      }
-    }
-  }
-
-  // Common code path for original and adaptive free lists.
-
-  // this object is live: we'd normally expect this to be
+    assert(freeFinger() < addr, "freeFinger points too high");
+    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
+  }
+
+  // This object is live: we'd normally expect this to be
   // an oop, and like to assert the following:
   // assert(oop(addr)->is_oop(), "live block should be an oop");
   // However, as we commented above, this may be an object whose
@@ -8257,7 +8249,7 @@
     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
            "alignment problem");
 
-    #ifdef DEBUG
+#ifdef DEBUG
       if (oop(addr)->klass_or_null() != NULL &&
           (   !_collector->should_unload_classes()
            || (oop(addr)->is_parsable()) &&
@@ -8271,7 +8263,7 @@
                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
                "P-mark and computed size do not agree");
       }
-    #endif
+#endif
 
   } else {
     // This should be an initialized object that's alive.
@@ -8298,18 +8290,16 @@
   return size;
 }
 
-void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
-                                            size_t chunkSize) {
-  // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
-  // scheme.
+void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
+                                                 size_t chunkSize) {
+  // do_post_free_or_garbage_chunk() should only be called in the case
+  // of the adaptive free list allocator.
   bool fcInFreeLists = fc->isFree();
   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
   assert((HeapWord*)fc <= _limit, "sweep invariant");
   if (CMSTestInFreeList && fcInFreeLists) {
-    assert(_sp->verifyChunkInFreeLists(fc),
-      "free chunk is not in free lists");
-  }
-
+    assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
+  }
 
   if (CMSTraceSweeper) {
     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
@@ -8382,20 +8372,21 @@
     if (inFreeRange()) {
       // In a free range but cannot coalesce with the right hand chunk.
       // Put the current free range into the free lists.
-      flushCurFreeChunk(freeFinger(),
-        pointer_delta(addr, freeFinger()));
+      flush_cur_free_chunk(freeFinger(),
+                           pointer_delta(addr, freeFinger()));
     }
     // Set up for new free range.  Pass along whether the right hand
     // chunk is in the free lists.
     initialize_free_range((HeapWord*)fc, fcInFreeLists);
   }
 }
-void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
+
+void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   assert(size > 0,
     "A zero sized chunk cannot be added to the free lists.");
   if (!freeRangeInFreeLists()) {
-    if(CMSTestInFreeList) {
+    if (CMSTestInFreeList) {
       FreeChunk* fc = (FreeChunk*) chunk;
       fc->setSize(size);
       assert(!_sp->verifyChunkInFreeLists(fc),
@@ -8430,7 +8421,7 @@
   // chunk just flushed, they will need to wait for the next
   // sweep to be coalesced.
   if (inFreeRange()) {
-    flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
+    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
   }
 
   // First give up the locks, then yield, then re-lock.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1701,7 +1701,9 @@
   CMSCollector*                  _collector;  // collector doing the work
   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
   CompactibleFreeListSpace*      _sp;   // Space being swept
-  HeapWord*                      _limit;
+  HeapWord*                      _limit;// the address at which the sweep should stop because
+                                        // we do not expect blocks eligible for sweeping past
+                                        // that address.
   Mutex*                         _freelistLock; // Free list lock (in space)
   CMSBitMap*                     _bitMap;       // Marking bit map (in
                                                 // generation)
@@ -1745,14 +1747,13 @@
  private:
   // Code that is common to a free chunk or garbage when
   // encountered during sweeping.
-  void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
-                                  size_t chunkSize);
+  void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
   // Process a free chunk during sweeping.
-  void doAlreadyFreeChunk(FreeChunk *fc);
+  void do_already_free_chunk(FreeChunk *fc);
   // Process a garbage chunk during sweeping.
-  size_t doGarbageChunk(FreeChunk *fc);
+  size_t do_garbage_chunk(FreeChunk *fc);
   // Process a live chunk during sweeping.
-  size_t doLiveChunk(FreeChunk* fc);
+  size_t do_live_chunk(FreeChunk* fc);
 
   // Accessors.
   HeapWord* freeFinger() const          { return _freeFinger; }
@@ -1769,7 +1770,7 @@
   // Initialize a free range.
   void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
   // Return this chunk to the free lists.
-  void flushCurFreeChunk(HeapWord* chunk, size_t size);
+  void flush_cur_free_chunk(HeapWord* chunk, size_t size);
 
   // Check if we should yield and do so when necessary.
   inline void do_yield_check(HeapWord* addr);
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/shared/vmGCOperations.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
@@ -457,6 +458,7 @@
   _marking_task_overhead(1.0),
   _cleanup_sleep_factor(0.0),
   _cleanup_task_overhead(1.0),
+  _cleanup_list("Cleanup List"),
   _region_bm(max_regions, false /* in_resource_area*/),
   _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
            CardTableModRefBS::card_shift,
@@ -520,12 +522,6 @@
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   satb_qs.set_buffer_size(G1SATBBufferSize);
 
-  int size = (int) MAX2(ParallelGCThreads, (size_t)1);
-  _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
-  for (int i = 0 ; i < size; i++) {
-    _par_cleanup_thread_state[i] = new ParCleanupThreadState;
-  }
-
   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
 
@@ -710,11 +706,6 @@
 }
 
 ConcurrentMark::~ConcurrentMark() {
-  int size = (int) MAX2(ParallelGCThreads, (size_t)1);
-  for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i];
-  FREE_C_HEAP_ARRAY(ParCleanupThreadState*,
-                    _par_cleanup_thread_state);
-
   for (int i = 0; i < (int) _max_task_num; ++i) {
     delete _task_queues->queue(i);
     delete _tasks[i];
@@ -1142,6 +1133,8 @@
     return;
   }
 
+  SvcGCMarker sgcm(SvcGCMarker::OTHER);
+
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(before)");
@@ -1168,12 +1161,12 @@
     if (G1TraceMarkStackOverflow)
       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
   } else {
+    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
     // We're done with marking.
     // This is the end of  the marking cycle, we're expected all
     // threads to have SATB queues with active set to true.
-    JavaThread::satb_mark_queue_set().set_active_all_threads(
-                                                  false, /* new active value */
-                                                  true /* expected_active */);
+    satb_mq_set.set_active_all_threads(false, /* new active value */
+                                       true /* expected_active */);
 
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
@@ -1507,21 +1500,20 @@
   size_t _max_live_bytes;
   size_t _regions_claimed;
   size_t _freed_bytes;
-  size_t _cleared_h_regions;
-  size_t _freed_regions;
-  UncleanRegionList* _unclean_region_list;
+  FreeRegionList _local_cleanup_list;
+  HumongousRegionSet _humongous_proxy_set;
   double _claimed_region_time;
   double _max_region_time;
 
 public:
   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
-                             UncleanRegionList* list,
                              int worker_num);
   size_t freed_bytes() { return _freed_bytes; }
-  size_t cleared_h_regions() { return _cleared_h_regions; }
-  size_t freed_regions() { return  _freed_regions; }
-  UncleanRegionList* unclean_region_list() {
-    return _unclean_region_list;
+  FreeRegionList* local_cleanup_list() {
+    return &_local_cleanup_list;
+  }
+  HumongousRegionSet* humongous_proxy_set() {
+    return &_humongous_proxy_set;
   }
 
   bool doHeapRegion(HeapRegion *r);
@@ -1534,25 +1526,22 @@
 
 class G1ParNoteEndTask: public AbstractGangTask {
   friend class G1NoteEndOfConcMarkClosure;
+
 protected:
   G1CollectedHeap* _g1h;
   size_t _max_live_bytes;
   size_t _freed_bytes;
-  ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state;
+  FreeRegionList* _cleanup_list;
+
 public:
   G1ParNoteEndTask(G1CollectedHeap* g1h,
-                   ConcurrentMark::ParCleanupThreadState**
-                   par_cleanup_thread_state) :
+                   FreeRegionList* cleanup_list) :
     AbstractGangTask("G1 note end"), _g1h(g1h),
-    _max_live_bytes(0), _freed_bytes(0),
-    _par_cleanup_thread_state(par_cleanup_thread_state)
-  {}
+    _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
 
   void work(int i) {
     double start = os::elapsedTime();
-    G1NoteEndOfConcMarkClosure g1_note_end(_g1h,
-                                           &_par_cleanup_thread_state[i]->list,
-                                           i);
+    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i);
     if (G1CollectedHeap::use_parallel_gc_threads()) {
       _g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
                                             HeapRegion::NoteEndClaimValue);
@@ -1561,14 +1550,18 @@
     }
     assert(g1_note_end.complete(), "Shouldn't have yielded!");
 
-    // Now finish up freeing the current thread's regions.
-    _g1h->finish_free_region_work(g1_note_end.freed_bytes(),
-                                  g1_note_end.cleared_h_regions(),
-                                  0, NULL);
+    // Now update the lists
+    _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
+                                            NULL /* free_list */,
+                                            g1_note_end.humongous_proxy_set(),
+                                            true /* par */);
     {
       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
       _max_live_bytes += g1_note_end.max_live_bytes();
       _freed_bytes += g1_note_end.freed_bytes();
+
+      _cleanup_list->add_as_tail(g1_note_end.local_cleanup_list());
+      assert(g1_note_end.local_cleanup_list()->is_empty(), "post-condition");
     }
     double end = os::elapsedTime();
     if (G1PrintParCleanupStats) {
@@ -1609,30 +1602,28 @@
 
 G1NoteEndOfConcMarkClosure::
 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
-                           UncleanRegionList* list,
                            int worker_num)
   : _g1(g1), _worker_num(worker_num),
     _max_live_bytes(0), _regions_claimed(0),
-    _freed_bytes(0), _cleared_h_regions(0), _freed_regions(0),
+    _freed_bytes(0),
     _claimed_region_time(0.0), _max_region_time(0.0),
-    _unclean_region_list(list)
-{}
-
-bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) {
+    _local_cleanup_list("Local Cleanup List"),
+    _humongous_proxy_set("Local Cleanup Humongous Proxy Set") { }
+
+bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
   // We use a claim value of zero here because all regions
   // were claimed with value 1 in the FinalCount task.
-  r->reset_gc_time_stamp();
-  if (!r->continuesHumongous()) {
+  hr->reset_gc_time_stamp();
+  if (!hr->continuesHumongous()) {
     double start = os::elapsedTime();
     _regions_claimed++;
-    r->note_end_of_marking();
-    _max_live_bytes += r->max_live_bytes();
-    _g1->free_region_if_totally_empty_work(r,
-                                           _freed_bytes,
-                                           _cleared_h_regions,
-                                           _freed_regions,
-                                           _unclean_region_list,
-                                           true /*par*/);
+    hr->note_end_of_marking();
+    _max_live_bytes += hr->max_live_bytes();
+    _g1->free_region_if_totally_empty(hr,
+                                      &_freed_bytes,
+                                      &_local_cleanup_list,
+                                      &_humongous_proxy_set,
+                                      true /* par */);
     double region_time = (os::elapsedTime() - start);
     _claimed_region_time += region_time;
     if (region_time > _max_region_time) _max_region_time = region_time;
@@ -1652,6 +1643,8 @@
     return;
   }
 
+  g1h->verify_region_sets_optional();
+
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(before)");
@@ -1716,7 +1709,7 @@
 
   // Note end of marking in all heap regions.
   double note_end_start = os::elapsedTime();
-  G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state);
+  G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     int n_workers = g1h->workers()->total_workers();
     g1h->set_par_threads(n_workers);
@@ -1728,9 +1721,14 @@
   } else {
     g1_par_note_end_task.work(0);
   }
-  g1h->set_unclean_regions_coming(true);
+
+  if (!cleanup_list_is_empty()) {
+    // The cleanup list is not empty, so we'll have to process it
+    // concurrently. Notify anyone else that might be wanting free
+    // regions that there will be more free regions coming soon.
+    g1h->set_free_regions_coming();
+  }
   double note_end_end = os::elapsedTime();
-  // Tell the mutators that there might be unclean regions coming...
   if (G1PrintParCleanupStats) {
     gclog_or_tty->print_cr("  note end of marking: %8.3f ms.",
                            (note_end_end - note_end_start)*1000.0);
@@ -1796,52 +1794,70 @@
                      /* silent       */ false,
                      /* prev marking */ true);
   }
+
+  g1h->verify_region_sets_optional();
 }
 
 void ConcurrentMark::completeCleanup() {
-  // A full collection intervened.
   if (has_aborted()) return;
 
-  int first = 0;
-  int last = (int)MAX2(ParallelGCThreads, (size_t)1);
-  for (int t = 0; t < last; t++) {
-    UncleanRegionList* list = &_par_cleanup_thread_state[t]->list;
-    assert(list->well_formed(), "Inv");
-    HeapRegion* hd = list->hd();
-    while (hd != NULL) {
-      // Now finish up the other stuff.
-      hd->rem_set()->clear();
-      HeapRegion* next_hd = hd->next_from_unclean_list();
-      (void)list->pop();
-      assert(list->hd() == next_hd, "how not?");
-      _g1h->put_region_on_unclean_list(hd);
-      if (!hd->isHumongous()) {
-        // Add this to the _free_regions count by 1.
-        _g1h->finish_free_region_work(0, 0, 1, NULL);
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  _cleanup_list.verify_optional();
+  FreeRegionList local_free_list("Local Cleanup List");
+
+  if (G1ConcRegionFreeingVerbose) {
+    gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
+                           "cleanup list has "SIZE_FORMAT" entries",
+                           _cleanup_list.length());
+  }
+
+  // Noone else should be accessing the _cleanup_list at this point,
+  // so it's not necessary to take any locks
+  while (!_cleanup_list.is_empty()) {
+    HeapRegion* hr = _cleanup_list.remove_head();
+    assert(hr != NULL, "the list was not empty");
+    hr->rem_set()->clear();
+    local_free_list.add_as_tail(hr);
+
+    // Instead of adding one region at a time to the secondary_free_list,
+    // we accumulate them in the local list and move them a few at a
+    // time. This also cuts down on the number of notify_all() calls
+    // we do during this process. We'll also append the local list when
+    // _cleanup_list is empty (which means we just removed the last
+    // region from the _cleanup_list).
+    if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
+        _cleanup_list.is_empty()) {
+      if (G1ConcRegionFreeingVerbose) {
+        gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
+                               "appending "SIZE_FORMAT" entries to the "
+                               "secondary_free_list, clean list still has "
+                               SIZE_FORMAT" entries",
+                               local_free_list.length(),
+                               _cleanup_list.length());
       }
-      hd = list->hd();
-      assert(hd == next_hd, "how not?");
+
+      {
+        MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
+        g1h->secondary_free_list_add_as_tail(&local_free_list);
+        SecondaryFreeList_lock->notify_all();
+      }
+
+      if (G1StressConcRegionFreeing) {
+        for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
+          os::sleep(Thread::current(), (jlong) 1, false);
+        }
+      }
     }
   }
+  assert(local_free_list.is_empty(), "post-condition");
 }
 
-
-class G1CMIsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
- public:
-  G1CMIsAliveClosure(G1CollectedHeap* g1) :
-    _g1(g1)
-  {}
-
-  void do_object(oop obj) {
-    assert(false, "not to be invoked");
-  }
-  bool do_object_b(oop obj) {
-    HeapWord* addr = (HeapWord*)obj;
-    return addr != NULL &&
-           (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
-  }
-};
+bool G1CMIsAliveClosure::do_object_b(oop obj) {
+  HeapWord* addr = (HeapWord*)obj;
+  return addr != NULL &&
+         (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
+}
 
 class G1CMKeepAliveClosure: public OopClosure {
   G1CollectedHeap* _g1;
@@ -1896,16 +1912,15 @@
   rp->setup_policy(clear_all_soft_refs);
   assert(_markStack.isEmpty(), "mark stack should be empty");
 
-  G1CMIsAliveClosure   g1IsAliveClosure  (g1h);
-  G1CMKeepAliveClosure g1KeepAliveClosure(g1h, this, nextMarkBitMap());
+  G1CMIsAliveClosure   g1_is_alive(g1h);
+  G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
   G1CMDrainMarkingStackClosure
-    g1DrainMarkingStackClosure(nextMarkBitMap(), &_markStack,
-                               &g1KeepAliveClosure);
+    g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
 
   // XXXYYY  Also: copy the parallel ref processing code from CMS.
-  rp->process_discovered_references(&g1IsAliveClosure,
-                                    &g1KeepAliveClosure,
-                                    &g1DrainMarkingStackClosure,
+  rp->process_discovered_references(&g1_is_alive,
+                                    &g1_keep_alive,
+                                    &g1_drain_mark_stack,
                                     NULL);
   assert(_markStack.overflow() || _markStack.isEmpty(),
          "mark stack should be empty (unless it overflowed)");
@@ -1918,8 +1933,8 @@
   assert(!rp->discovery_enabled(), "should have been disabled");
 
   // Now clean up stale oops in SymbolTable and StringTable
-  SymbolTable::unlink(&g1IsAliveClosure);
-  StringTable::unlink(&g1IsAliveClosure);
+  SymbolTable::unlink(&g1_is_alive);
+  StringTable::unlink(&g1_is_alive);
 }
 
 void ConcurrentMark::swapMarkBitMaps() {
@@ -2907,9 +2922,9 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T* p) {
-    assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
-    assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(),
-           "invariant");
+    assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
+    assert(!_g1h->is_on_free_list(
+                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
 
     oop obj = oopDesc::load_decode_heap_oop(p);
     if (_cm->verbose_high())
@@ -3129,8 +3144,8 @@
 void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
-  assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(),
-         "invariant");
+  assert(!_g1h->is_on_free_list(
+              _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
   assert(!_g1h->is_obj_ill(obj), "invariant");
   assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
 
@@ -3375,8 +3390,8 @@
                                (void*) obj);
 
       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
-      assert(!_g1h->heap_region_containing(obj)->is_on_free_list(),
-             "invariant");
+      assert(!_g1h->is_on_free_list(
+                  _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
 
       scan_object(obj);
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
 
-#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSets.hpp"
 #include "utilities/taskqueue.hpp"
 
 class G1CollectedHeap;
@@ -33,6 +33,25 @@
 typedef GenericTaskQueue<oop>            CMTaskQueue;
 typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
 
+// Closure used by CM during concurrent reference discovery
+// and reference processing (during remarking) to determine
+// if a particular object is alive. It is primarily used
+// to determine if referents of discovered reference objects
+// are alive. An instance is also embedded into the
+// reference processor as the _is_alive_non_header field
+class G1CMIsAliveClosure: public BoolObjectClosure {
+  G1CollectedHeap* _g1;
+ public:
+  G1CMIsAliveClosure(G1CollectedHeap* g1) :
+    _g1(g1)
+  {}
+
+  void do_object(oop obj) {
+    ShouldNotCallThis();
+  }
+  bool do_object_b(oop obj);
+};
+
 // A generic CM bit map.  This is essentially a wrapper around the BitMap
 // class, with one bit per (1<<_shifter) HeapWords.
 
@@ -350,13 +369,7 @@
   double                _cleanup_sleep_factor;
   double                _cleanup_task_overhead;
 
-  // Stuff related to age cohort processing.
-  struct ParCleanupThreadState {
-    char _pre[64];
-    UncleanRegionList list;
-    char _post[64];
-  };
-  ParCleanupThreadState** _par_cleanup_thread_state;
+  FreeRegionList        _cleanup_list;
 
   // CMS marking support structures
   CMBitMap                _markBitMap1;
@@ -465,6 +478,10 @@
   // prints all gathered CM-related statistics
   void print_stats();
 
+  bool cleanup_list_is_empty() {
+    return _cleanup_list.is_empty();
+  }
+
   // accessor methods
   size_t parallel_marking_threads() { return _parallel_marking_threads; }
   double sleep_factor()             { return _sleep_factor; }
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -95,8 +95,8 @@
   _vtime_start = os::elapsedVTime();
   wait_for_universe_init();
 
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1_policy = g1->g1_policy();
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1_policy = g1h->g1_policy();
   G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
   Thread *current_thread = Thread::current();
 
@@ -119,7 +119,7 @@
       if (!g1_policy->in_young_gc_mode()) {
         // this ensures the flag is not set if we bail out of the marking
         // cycle; normally the flag is cleared immediately after cleanup
-        g1->set_marking_complete();
+        g1h->set_marking_complete();
 
         if (g1_policy->adaptive_young_list_length()) {
           double now = os::elapsedTime();
@@ -228,10 +228,20 @@
         VM_CGC_Operation op(&cl_cl, verbose_str);
         VMThread::execute(&op);
       } else {
-        G1CollectedHeap::heap()->set_marking_complete();
+        g1h->set_marking_complete();
       }
 
-      if (!cm()->has_aborted()) {
+      // Check if cleanup set the free_regions_coming flag. If it
+      // hasn't, we can just skip the next step.
+      if (g1h->free_regions_coming()) {
+        // The following will finish freeing up any regions that we
+        // found to be empty during cleanup. We'll do this part
+        // without joining the suspendible set. If an evacuation pause
+        // takes places, then we would carry on freeing regions in
+        // case they are needed by the pause. If a Full GC takes
+        // places, it would wait for us to process the regions
+        // reclaimed by cleanup.
+
         double cleanup_start_sec = os::elapsedTime();
         if (PrintGC) {
           gclog_or_tty->date_stamp(PrintGCDateStamps);
@@ -240,23 +250,22 @@
         }
 
         // Now do the remainder of the cleanup operation.
-        _sts.join();
         _cm->completeCleanup();
-        if (!cm()->has_aborted()) {
-          g1_policy->record_concurrent_mark_cleanup_completed();
+        g1_policy->record_concurrent_mark_cleanup_completed();
 
-          double cleanup_end_sec = os::elapsedTime();
-          if (PrintGC) {
-            gclog_or_tty->date_stamp(PrintGCDateStamps);
-            gclog_or_tty->stamp(PrintGCTimeStamps);
-            gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
-                                   cleanup_end_sec - cleanup_start_sec);
-          }
+        double cleanup_end_sec = os::elapsedTime();
+        if (PrintGC) {
+          gclog_or_tty->date_stamp(PrintGCDateStamps);
+          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
+                                 cleanup_end_sec - cleanup_start_sec);
         }
-        _sts.leave();
+
+        // We're done: no more free regions coming.
+        g1h->reset_free_regions_coming();
       }
-      // We're done: no more unclean regions coming.
-      G1CollectedHeap::heap()->set_unclean_regions_coming(false);
+      guarantee(cm()->cleanup_list_is_empty(),
+                "at this point there should be no regions on the cleanup list");
 
       if (cm()->has_aborted()) {
         if (PrintGC) {
@@ -277,7 +286,9 @@
     // completed. This will also notify the FullGCCount_lock in case a
     // Java thread is waiting for a full GC to happen (e.g., it
     // called System.gc() with +ExplicitGCInvokesConcurrent).
-    g1->increment_full_collections_completed(true /* outer */);
+    _sts.join();
+    g1h->increment_full_collections_completed(true /* concurrent */);
+    _sts.leave();
   }
   assert(_should_terminate, "just checking");
 
--- a/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,194 +0,0 @@
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/g1/concurrentZFThread.hpp"
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
-#include "memory/space.inline.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "utilities/copy.hpp"
-
-// ======= Concurrent Zero-Fill Thread ========
-
-// The CM thread is created when the G1 garbage collector is used
-
-int ConcurrentZFThread::_region_allocs = 0;
-int ConcurrentZFThread::_sync_zfs = 0;
-int ConcurrentZFThread::_zf_waits = 0;
-int ConcurrentZFThread::_regions_filled = 0;
-
-ConcurrentZFThread::ConcurrentZFThread() :
-  ConcurrentGCThread()
-{
-  create_and_start();
-}
-
-void ConcurrentZFThread::wait_for_ZF_completed(HeapRegion* hr) {
-  assert(ZF_mon->owned_by_self(), "Precondition.");
-  note_zf_wait();
-  while (hr->zero_fill_state() == HeapRegion::ZeroFilling) {
-    ZF_mon->wait(Mutex::_no_safepoint_check_flag);
-  }
-}
-
-void ConcurrentZFThread::processHeapRegion(HeapRegion* hr) {
-  assert(!Universe::heap()->is_gc_active(),
-         "This should not happen during GC.");
-  assert(hr != NULL, "Precondition");
-  // These are unlocked reads, but if this test is successful, then no
-  // other thread will attempt this zero filling.  Only a GC thread can
-  // modify the ZF state of a region whose state is zero-filling, and this
-  // should only happen while the ZF thread is locking out GC.
-  if (hr->zero_fill_state() == HeapRegion::ZeroFilling
-      && hr->zero_filler() == Thread::current()) {
-    assert(hr->top() == hr->bottom(), "better be empty!");
-    assert(!hr->isHumongous(), "Only free regions on unclean list.");
-    Copy::fill_to_words(hr->bottom(), hr->capacity()/HeapWordSize);
-    note_region_filled();
-  }
-}
-
-void ConcurrentZFThread::run() {
-  initialize_in_thread();
-  Thread* thr_self = Thread::current();
-  _vtime_start = os::elapsedVTime();
-  wait_for_universe_init();
-
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  _sts.join();
-  while (!_should_terminate) {
-    _sts.leave();
-
-    {
-      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-
-      // This local variable will hold a region being zero-filled.  This
-      // region will neither be on the unclean or zero-filled lists, and
-      // will not be available for allocation; thus, we might have an
-      // allocation fail, causing a full GC, because of this, but this is a
-      // price we will pay.  (In future, we might want to make the fact
-      // that there's a region being zero-filled apparent to the G1 heap,
-      // which could then wait for it in this extreme case...)
-      HeapRegion* to_fill;
-
-      while (!g1->should_zf()
-             || (to_fill = g1->pop_unclean_region_list_locked()) == NULL)
-        ZF_mon->wait(Mutex::_no_safepoint_check_flag);
-      while (to_fill->zero_fill_state() == HeapRegion::ZeroFilling)
-        ZF_mon->wait(Mutex::_no_safepoint_check_flag);
-
-      // So now to_fill is non-NULL and is not ZeroFilling.  It might be
-      // Allocated or ZeroFilled.  (The latter could happen if this thread
-      // starts the zero-filling of a region, but a GC intervenes and
-      // pushes new regions needing on the front of the filling on the
-      // front of the list.)
-
-      switch (to_fill->zero_fill_state()) {
-      case HeapRegion::Allocated:
-        to_fill = NULL;
-        break;
-
-      case HeapRegion::NotZeroFilled:
-        to_fill->set_zero_fill_in_progress(thr_self);
-
-        ZF_mon->unlock();
-        _sts.join();
-        processHeapRegion(to_fill);
-        _sts.leave();
-        ZF_mon->lock_without_safepoint_check();
-
-        if (to_fill->zero_fill_state() == HeapRegion::ZeroFilling
-            && to_fill->zero_filler() == thr_self) {
-          to_fill->set_zero_fill_complete();
-          (void)g1->put_free_region_on_list_locked(to_fill);
-        }
-        break;
-
-      case HeapRegion::ZeroFilled:
-        (void)g1->put_free_region_on_list_locked(to_fill);
-        break;
-
-      case HeapRegion::ZeroFilling:
-        ShouldNotReachHere();
-        break;
-      }
-    }
-    _vtime_accum = (os::elapsedVTime() - _vtime_start);
-    _sts.join();
-  }
-  _sts.leave();
-
-  assert(_should_terminate, "just checking");
-  terminate();
-}
-
-bool ConcurrentZFThread::offer_yield() {
-  if (_sts.should_yield()) {
-    _sts.yield("Concurrent ZF");
-    return true;
-  } else {
-    return false;
-  }
-}
-
-void ConcurrentZFThread::stop() {
-  // it is ok to take late safepoints here, if needed
-  MutexLockerEx mu(Terminator_lock);
-  _should_terminate = true;
-  while (!_has_terminated) {
-    Terminator_lock->wait();
-  }
-}
-
-void ConcurrentZFThread::print() const {
-  print_on(tty);
-}
-
-void ConcurrentZFThread::print_on(outputStream* st) const {
-  st->print("\"G1 Concurrent Zero-Fill Thread\" ");
-  Thread::print_on(st);
-  st->cr();
-}
-
-
-double ConcurrentZFThread::_vtime_accum;
-
-void ConcurrentZFThread::print_summary_info() {
-  gclog_or_tty->print("\nConcurrent Zero-Filling:\n");
-  gclog_or_tty->print("  Filled %d regions, used %5.2fs.\n",
-                      _regions_filled,
-                      vtime_accum());
-  gclog_or_tty->print("  Of %d region allocs, %d (%5.2f%%) required sync ZF,\n",
-                      _region_allocs, _sync_zfs,
-                      (_region_allocs > 0 ?
-                       (float)_sync_zfs/(float)_region_allocs*100.0 :
-                       0.0));
-  gclog_or_tty->print("     and %d (%5.2f%%) required a ZF wait.\n",
-                      _zf_waits,
-                      (_region_allocs > 0 ?
-                       (float)_zf_waits/(float)_region_allocs*100.0 :
-                       0.0));
-
-}
--- a/src/share/vm/gc_implementation/g1/concurrentZFThread.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
-
-#include "gc_implementation/shared/concurrentGCThread.hpp"
-
-// The Concurrent ZF Thread.  Performs concurrent zero-filling.
-
-class ConcurrentZFThread: public ConcurrentGCThread {
-  friend class VMStructs;
-  friend class ZeroFillRegionClosure;
-
- private:
-
-  // Zero fill the heap region.
-  void processHeapRegion(HeapRegion* r);
-
-  // Stats
-  //   Allocation (protected by heap lock).
-  static int _region_allocs;  // Number of regions allocated
-  static int _sync_zfs;       //   Synchronous zero-fills +
-  static int _zf_waits;      //   Wait for conc zero-fill completion.
-
-  // Number of regions CFZ thread fills.
-  static int _regions_filled;
-
-  double _vtime_start;  // Initial virtual time.
-
-  // These are static because the "print_summary_info" method is, and
-  // it currently assumes there is only one ZF thread.  We'll change when
-  // we need to.
-  static double _vtime_accum;  // Initial virtual time.
-  static double vtime_accum() { return _vtime_accum; }
-
-  // Offer yield for GC.  Returns true if yield occurred.
-  bool offer_yield();
-
- public:
-  // Constructor
-  ConcurrentZFThread();
-
-  // Main loop.
-  virtual void run();
-
-  // Printing
-  void print_on(outputStream* st) const;
-  void print() const;
-
-  // Waits until "r" has been zero-filled.  Requires caller to hold the
-  // ZF_mon.
-  static void wait_for_ZF_completed(HeapRegion* r);
-
-  // Get or clear the current unclean region.  Should be done
-  // while holding the ZF_needed_mon lock.
-
-  // shutdown
-  void stop();
-
-  // Stats
-  static void note_region_alloc() {_region_allocs++; }
-  static void note_sync_zfs() { _sync_zfs++; }
-  static void note_zf_wait() { _zf_waits++; }
-  static void note_region_filled() { _regions_filled++; }
-
-  static void print_summary_info();
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,7 +222,7 @@
 
 // Action_mark - update the BOT for the block [blk_start, blk_end).
 //               Current typical use is for splitting a block.
-// Action_single - udpate the BOT for an allocation.
+// Action_single - update the BOT for an allocation.
 // Action_verify - BOT verification.
 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
                                            HeapWord* blk_end,
@@ -331,47 +331,6 @@
   do_block_internal(blk_start, blk_end, Action_mark);
 }
 
-void G1BlockOffsetArray::join_blocks(HeapWord* blk1, HeapWord* blk2) {
-  HeapWord* blk1_start = Universe::heap()->block_start(blk1);
-  HeapWord* blk2_start = Universe::heap()->block_start(blk2);
-  assert(blk1 == blk1_start && blk2 == blk2_start,
-         "Must be block starts.");
-  assert(blk1 + _sp->block_size(blk1) == blk2, "Must be contiguous.");
-  size_t blk1_start_index = _array->index_for(blk1);
-  size_t blk2_start_index = _array->index_for(blk2);
-  assert(blk1_start_index <= blk2_start_index, "sanity");
-  HeapWord* blk2_card_start = _array->address_for_index(blk2_start_index);
-  if (blk2 == blk2_card_start) {
-    // blk2 starts a card.  Does blk1 start on the prevous card, or futher
-    // back?
-    assert(blk1_start_index < blk2_start_index, "must be lower card.");
-    if (blk1_start_index + 1 == blk2_start_index) {
-      // previous card; new value for blk2 card is size of blk1.
-      _array->set_offset_array(blk2_start_index, (u_char) _sp->block_size(blk1));
-    } else {
-      // Earlier card; go back a card.
-      _array->set_offset_array(blk2_start_index, N_words);
-    }
-  } else {
-    // blk2 does not start a card.  Does it cross a card?  If not, nothing
-    // to do.
-    size_t blk2_end_index =
-      _array->index_for(blk2 + _sp->block_size(blk2) - 1);
-    assert(blk2_end_index >= blk2_start_index, "sanity");
-    if (blk2_end_index > blk2_start_index) {
-      // Yes, it crosses a card.  The value for the next card must change.
-      if (blk1_start_index + 1 == blk2_start_index) {
-        // previous card; new value for second blk2 card is size of blk1.
-        _array->set_offset_array(blk2_start_index + 1,
-                                 (u_char) _sp->block_size(blk1));
-      } else {
-        // Earlier card; go back a card.
-        _array->set_offset_array(blk2_start_index + 1, N_words);
-      }
-    }
-  }
-}
-
 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
   assert(_bottom <= addr && addr < _end,
          "addr must be covered by this Array");
@@ -580,15 +539,50 @@
 #endif
 }
 
-void
-G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
-  assert(_end ==  new_end, "_end should have already been updated");
+bool
+G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
+                                      size_t word_size) const {
+  size_t first_card = _array->index_for(obj_start);
+  size_t last_card = _array->index_for(obj_start + word_size - 1);
+  if (!_array->is_card_boundary(obj_start)) {
+    // If the object is not on a card boundary the BOT entry of the
+    // first card should point to another object so we should not
+    // check that one.
+    first_card += 1;
+  }
+  for (size_t card = first_card; card <= last_card; card += 1) {
+    HeapWord* card_addr = _array->address_for_index(card);
+    HeapWord* block_start = block_start_const(card_addr);
+    if (block_start != obj_start) {
+      gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
+                             "card index: "SIZE_FORMAT" "
+                             "card addr: "PTR_FORMAT" BOT entry: %u "
+                             "obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
+                             "cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
+                             block_start, card, card_addr,
+                             _array->offset_array(card),
+                             obj_start, word_size, first_card, last_card);
+      return false;
+    }
+  }
+  return true;
+}
 
-  // The first BOT entry should have offset 0.
-  _array->set_offset_array(_array->index_for(_bottom), 0);
-  // The rest should point to the first one.
-  set_remainder_to_point_to_start(_bottom + N_words, new_end);
+#ifndef PRODUCT
+void
+G1BlockOffsetArray::print_on(outputStream* out) {
+  size_t from_index = _array->index_for(_bottom);
+  size_t to_index = _array->index_for(_end);
+  out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
+                "cards ["SIZE_FORMAT","SIZE_FORMAT")",
+                _bottom, _end, from_index, to_index);
+  for (size_t i = from_index; i < to_index; ++i) {
+    out->print_cr("  entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
+                  i, _array->address_for_index(i),
+                  (uint) _array->offset_array(i));
+  }
 }
+#endif // !PRODUCT
 
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetArrayContigSpace
@@ -641,10 +635,20 @@
 }
 
 void
-G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
-  G1BlockOffsetArray::set_for_starts_humongous(new_end);
+G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
+  assert(new_top <= _end, "_end should have already been updated");
+
+  // The first BOT entry should have offset 0.
+  zero_bottom_entry();
+  initialize_threshold();
+  alloc_block(_bottom, new_top);
+ }
 
-  // Make sure _next_offset_threshold and _next_offset_index point to new_end.
-  _next_offset_threshold = new_end;
-  _next_offset_index     = _array->index_for(new_end);
+#ifndef PRODUCT
+void
+G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
+  G1BlockOffsetArray::print_on(out);
+  out->print_cr("  next offset threshold: "PTR_FORMAT, _next_offset_threshold);
+  out->print_cr("  next offset index:     "SIZE_FORMAT, _next_offset_index);
 }
+#endif // !PRODUCT
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -352,11 +352,6 @@
   // The following methods are useful and optimized for a
   // general, non-contiguous space.
 
-  // The given arguments are required to be the starts of adjacent ("blk1"
-  // before "blk2") well-formed blocks covered by "this".  After this call,
-  // they should be considered to form one block.
-  virtual void join_blocks(HeapWord* blk1, HeapWord* blk2);
-
   // Given a block [blk_start, blk_start + full_blk_size), and
   // a left_blk_size < full_blk_size, adjust the BOT to show two
   // blocks [blk_start, blk_start + left_blk_size) and
@@ -429,6 +424,12 @@
     verify_single_block(blk, blk + size);
   }
 
+  // Used by region verification. Checks that the contents of the
+  // BOT reflect that there's a single object that spans the address
+  // range [obj_start, obj_start + word_size); returns true if this is
+  // the case, returns false if it's not.
+  bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
+
   // Verify that the given block is before _unallocated_block
   inline void verify_not_unallocated(HeapWord* blk_start,
                                      HeapWord* blk_end) const {
@@ -444,7 +445,7 @@
 
   void check_all_cards(size_t left_card, size_t right_card) const;
 
-  virtual void set_for_starts_humongous(HeapWord* new_end);
+  virtual void print_on(outputStream* out) PRODUCT_RETURN;
 };
 
 // A subtype of BlockOffsetArray that takes advantage of the fact
@@ -494,7 +495,9 @@
   HeapWord* block_start_unsafe(const void* addr);
   HeapWord* block_start_unsafe_const(const void* addr) const;
 
-  virtual void set_for_starts_humongous(HeapWord* new_end);
+  void set_for_starts_humongous(HeapWord* new_top);
+
+  virtual void print_on(outputStream* out) PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
-#include "gc_implementation/g1/concurrentZFThread.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 #include "gc_implementation/g1/g1MarkSweep.hpp"
@@ -425,11 +424,9 @@
 
 void G1CollectedHeap::stop_conc_gc_threads() {
   _cg1r->stop();
-  _czft->stop();
   _cmThread->stop();
 }
 
-
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
@@ -481,49 +478,92 @@
 
 // Private methods.
 
-// Finds a HeapRegion that can be used to allocate a given size of block.
-
-
-HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
-                                                 bool do_expand,
-                                                 bool zero_filled) {
-  ConcurrentZFThread::note_region_alloc();
-  HeapRegion* res = alloc_free_region_from_lists(zero_filled);
+HeapRegion*
+G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) {
+  MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
+  while (!_secondary_free_list.is_empty() || free_regions_coming()) {
+    if (!_secondary_free_list.is_empty()) {
+      if (G1ConcRegionFreeingVerbose) {
+        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
+                               "secondary_free_list has "SIZE_FORMAT" entries",
+                               _secondary_free_list.length());
+      }
+      // It looks as if there are free regions available on the
+      // secondary_free_list. Let's move them to the free_list and try
+      // again to allocate from it.
+      append_secondary_free_list();
+
+      assert(!_free_list.is_empty(), "if the secondary_free_list was not "
+             "empty we should have moved at least one entry to the free_list");
+      HeapRegion* res = _free_list.remove_head();
+      if (G1ConcRegionFreeingVerbose) {
+        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
+                               "allocated "HR_FORMAT" from secondary_free_list",
+                               HR_FORMAT_PARAMS(res));
+      }
+      return res;
+    }
+
+    // Wait here until we get notifed either when (a) there are no
+    // more free regions coming or (b) some regions have been moved on
+    // the secondary_free_list.
+    SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
+  }
+
+  if (G1ConcRegionFreeingVerbose) {
+    gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
+                           "could not allocate from secondary_free_list");
+  }
+  return NULL;
+}
+
+HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
+                                             bool do_expand) {
+  assert(!isHumongous(word_size) ||
+                                  word_size <= (size_t) HeapRegion::GrainWords,
+         "the only time we use this to allocate a humongous region is "
+         "when we are allocating a single humongous region");
+
+  HeapRegion* res;
+  if (G1StressConcRegionFreeing) {
+    if (!_secondary_free_list.is_empty()) {
+      if (G1ConcRegionFreeingVerbose) {
+        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
+                               "forced to look at the secondary_free_list");
+      }
+      res = new_region_try_secondary_free_list(word_size);
+      if (res != NULL) {
+        return res;
+      }
+    }
+  }
+  res = _free_list.remove_head_or_null();
+  if (res == NULL) {
+    if (G1ConcRegionFreeingVerbose) {
+      gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
+                             "res == NULL, trying the secondary_free_list");
+    }
+    res = new_region_try_secondary_free_list(word_size);
+  }
   if (res == NULL && do_expand) {
     expand(word_size * HeapWordSize);
-    res = alloc_free_region_from_lists(zero_filled);
-    assert(res == NULL ||
-           (!res->isHumongous() &&
-            (!zero_filled ||
-             res->zero_fill_state() == HeapRegion::Allocated)),
-           "Alloc Regions must be zero filled (and non-H)");
+    res = _free_list.remove_head_or_null();
   }
   if (res != NULL) {
-    if (res->is_empty()) {
-      _free_regions--;
-    }
-    assert(!res->isHumongous() &&
-           (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated),
-           err_msg("Non-young alloc Regions must be zero filled (and non-H):"
-                   " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d",
-                   res->isHumongous(), zero_filled, res->zero_fill_state()));
-    assert(!res->is_on_unclean_list(),
-           "Alloc Regions must not be on the unclean list");
     if (G1PrintHeapRegions) {
-      gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
-                             "top "PTR_FORMAT,
-                             res->hrs_index(), res->bottom(), res->end(), res->top());
+      gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
+                             "top "PTR_FORMAT, res->hrs_index(),
+                             res->bottom(), res->end(), res->top());
     }
   }
   return res;
 }
 
-HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
-                                                         size_t word_size,
-                                                         bool zero_filled) {
+HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
+                                                 size_t word_size) {
   HeapRegion* alloc_region = NULL;
   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
-    alloc_region = newAllocRegion_work(word_size, true, zero_filled);
+    alloc_region = new_region_work(word_size, true /* do_expand */);
     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
       alloc_region->set_survivor();
     }
@@ -534,81 +574,220 @@
   return alloc_region;
 }
 
+int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
+                                                       size_t word_size) {
+  int first = -1;
+  if (num_regions == 1) {
+    // Only one region to allocate, no need to go through the slower
+    // path. The caller will attempt the expasion if this fails, so
+    // let's not try to expand here too.
+    HeapRegion* hr = new_region_work(word_size, false /* do_expand */);
+    if (hr != NULL) {
+      first = hr->hrs_index();
+    } else {
+      first = -1;
+    }
+  } else {
+    // We can't allocate humongous regions while cleanupComplete() is
+    // running, since some of the regions we find to be empty might not
+    // yet be added to the free list and it is not straightforward to
+    // know which list they are on so that we can remove them. Note
+    // that we only need to do this if we need to allocate more than
+    // one region to satisfy the current humongous allocation
+    // request. If we are only allocating one region we use the common
+    // region allocation code (see above).
+    wait_while_free_regions_coming();
+    append_secondary_free_list_if_not_empty();
+
+    if (free_regions() >= num_regions) {
+      first = _hrs->find_contiguous(num_regions);
+      if (first != -1) {
+        for (int i = first; i < first + (int) num_regions; ++i) {
+          HeapRegion* hr = _hrs->at(i);
+          assert(hr->is_empty(), "sanity");
+          assert(is_on_free_list(hr), "sanity");
+          hr->set_pending_removal(true);
+        }
+        _free_list.remove_all_pending(num_regions);
+      }
+    }
+  }
+  return first;
+}
+
 // If could fit into free regions w/o expansion, try.
 // Otherwise, if can expand, do so.
 // Otherwise, if using ex regions might help, try with ex given back.
 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
-  assert_heap_locked_or_at_safepoint();
-  assert(regions_accounted_for(), "Region leakage!");
-
-  // We can't allocate humongous regions while cleanupComplete is
-  // running, since some of the regions we find to be empty might not
-  // yet be added to the unclean list. If we're already at a
-  // safepoint, this call is unnecessary, not to mention wrong.
-  if (!SafepointSynchronize::is_at_safepoint()) {
-    wait_for_cleanup_complete();
-  }
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
+
+  verify_region_sets_optional();
 
   size_t num_regions =
          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
-
-  // Special case if < one region???
-
-  // Remember the ft size.
   size_t x_size = expansion_regions();
-
-  HeapWord* res = NULL;
-  bool eliminated_allocated_from_lists = false;
-
-  // Can the allocation potentially fit in the free regions?
-  if (free_regions() >= num_regions) {
-    res = _hrs->obj_allocate(word_size);
-  }
-  if (res == NULL) {
-    // Try expansion.
-    size_t fs = _hrs->free_suffix();
+  size_t fs = _hrs->free_suffix();
+  int first = humongous_obj_allocate_find_first(num_regions, word_size);
+  if (first == -1) {
+    // The only thing we can do now is attempt expansion.
     if (fs + x_size >= num_regions) {
       expand((num_regions - fs) * HeapRegion::GrainBytes);
-      res = _hrs->obj_allocate(word_size);
-      assert(res != NULL, "This should have worked.");
-    } else {
-      // Expansion won't help.  Are there enough free regions if we get rid
-      // of reservations?
-      size_t avail = free_regions();
-      if (avail >= num_regions) {
-        res = _hrs->obj_allocate(word_size);
-        if (res != NULL) {
-          remove_allocated_regions_from_lists();
-          eliminated_allocated_from_lists = true;
-        }
+      first = humongous_obj_allocate_find_first(num_regions, word_size);
+      assert(first != -1, "this should have worked");
+    }
+  }
+
+  if (first != -1) {
+    // Index of last region in the series + 1.
+    int last = first + (int) num_regions;
+
+    // We need to initialize the region(s) we just discovered. This is
+    // a bit tricky given that it can happen concurrently with
+    // refinement threads refining cards on these regions and
+    // potentially wanting to refine the BOT as they are scanning
+    // those cards (this can happen shortly after a cleanup; see CR
+    // 6991377). So we have to set up the region(s) carefully and in
+    // a specific order.
+
+    // The word size sum of all the regions we will allocate.
+    size_t word_size_sum = num_regions * HeapRegion::GrainWords;
+    assert(word_size <= word_size_sum, "sanity");
+
+    // This will be the "starts humongous" region.
+    HeapRegion* first_hr = _hrs->at(first);
+    // The header of the new object will be placed at the bottom of
+    // the first region.
+    HeapWord* new_obj = first_hr->bottom();
+    // This will be the new end of the first region in the series that
+    // should also match the end of the last region in the seriers.
+    HeapWord* new_end = new_obj + word_size_sum;
+    // This will be the new top of the first region that will reflect
+    // this allocation.
+    HeapWord* new_top = new_obj + word_size;
+
+    // First, we need to zero the header of the space that we will be
+    // allocating. When we update top further down, some refinement
+    // threads might try to scan the region. By zeroing the header we
+    // ensure that any thread that will try to scan the region will
+    // come across the zero klass word and bail out.
+    //
+    // NOTE: It would not have been correct to have used
+    // CollectedHeap::fill_with_object() and make the space look like
+    // an int array. The thread that is doing the allocation will
+    // later update the object header to a potentially different array
+    // type and, for a very short period of time, the klass and length
+    // fields will be inconsistent. This could cause a refinement
+    // thread to calculate the object size incorrectly.
+    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
+
+    // We will set up the first region as "starts humongous". This
+    // will also update the BOT covering all the regions to reflect
+    // that there is a single object that starts at the bottom of the
+    // first region.
+    first_hr->set_startsHumongous(new_top, new_end);
+
+    // Then, if there are any, we will set up the "continues
+    // humongous" regions.
+    HeapRegion* hr = NULL;
+    for (int i = first + 1; i < last; ++i) {
+      hr = _hrs->at(i);
+      hr->set_continuesHumongous(first_hr);
+    }
+    // If we have "continues humongous" regions (hr != NULL), then the
+    // end of the last one should match new_end.
+    assert(hr == NULL || hr->end() == new_end, "sanity");
+
+    // Up to this point no concurrent thread would have been able to
+    // do any scanning on any region in this series. All the top
+    // fields still point to bottom, so the intersection between
+    // [bottom,top] and [card_start,card_end] will be empty. Before we
+    // update the top fields, we'll do a storestore to make sure that
+    // no thread sees the update to top before the zeroing of the
+    // object header and the BOT initialization.
+    OrderAccess::storestore();
+
+    // Now that the BOT and the object header have been initialized,
+    // we can update top of the "starts humongous" region.
+    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
+           "new_top should be in this region");
+    first_hr->set_top(new_top);
+
+    // Now, we will update the top fields of the "continues humongous"
+    // regions. The reason we need to do this is that, otherwise,
+    // these regions would look empty and this will confuse parts of
+    // G1. For example, the code that looks for a consecutive number
+    // of empty regions will consider them empty and try to
+    // re-allocate them. We can extend is_empty() to also include
+    // !continuesHumongous(), but it is easier to just update the top
+    // fields here. The way we set top for all regions (i.e., top ==
+    // end for all regions but the last one, top == new_top for the
+    // last one) is actually used when we will free up the humongous
+    // region in free_humongous_region().
+    hr = NULL;
+    for (int i = first + 1; i < last; ++i) {
+      hr = _hrs->at(i);
+      if ((i + 1) == last) {
+        // last continues humongous region
+        assert(hr->bottom() < new_top && new_top <= hr->end(),
+               "new_top should fall on this region");
+        hr->set_top(new_top);
+      } else {
+        // not last one
+        assert(new_top > hr->end(), "new_top should be above this region");
+        hr->set_top(hr->end());
       }
     }
-  }
-  if (res != NULL) {
-    // Increment by the number of regions allocated.
-    // FIXME: Assumes regions all of size GrainBytes.
-#ifndef PRODUCT
-    mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
-                                           HeapRegion::GrainWords));
-#endif
-    if (!eliminated_allocated_from_lists)
-      remove_allocated_regions_from_lists();
-    _summary_bytes_used += word_size * HeapWordSize;
-    _free_regions -= num_regions;
-    _num_humongous_regions += (int) num_regions;
-  }
-  assert(regions_accounted_for(), "Region Leakage");
-  return res;
+    // If we have continues humongous regions (hr != NULL), then the
+    // end of the last one should match new_end and its top should
+    // match new_top.
+    assert(hr == NULL ||
+           (hr->end() == new_end && hr->top() == new_top), "sanity");
+
+    assert(first_hr->used() == word_size * HeapWordSize, "invariant");
+    _summary_bytes_used += first_hr->used();
+    _humongous_set.add(first_hr);
+
+    return new_obj;
+  }
+
+  verify_region_sets_optional();
+  return NULL;
 }
 
 void
 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
-  // The cleanup operation might update _summary_bytes_used
-  // concurrently with this method. So, right now, if we don't wait
-  // for it to complete, updates to _summary_bytes_used might get
-  // lost. This will be resolved in the near future when the operation
-  // of the free region list is revamped as part of CR 6977804.
-  wait_for_cleanup_complete();
+  // Other threads might still be trying to allocate using CASes out
+  // of the region we are retiring, as they can do so without holding
+  // the Heap_lock. So we first have to make sure that noone else can
+  // allocate in it by doing a maximal allocation. Even if our CAS
+  // attempt fails a few times, we'll succeed sooner or later given
+  // that a failed CAS attempt mean that the region is getting closed
+  // to being full (someone else succeeded in allocating into it).
+  size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
+
+  // This is the minimum free chunk we can turn into a dummy
+  // object. If the free space falls below this, then noone can
+  // allocate in this region anyway (all allocation requests will be
+  // of a size larger than this) so we won't have to perform the dummy
+  // allocation.
+  size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
+
+  while (free_word_size >= min_word_size_to_fill) {
+    HeapWord* dummy =
+      cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
+    if (dummy != NULL) {
+      // If the allocation was successful we should fill in the space.
+      CollectedHeap::fill_with_object(dummy, free_word_size);
+      break;
+    }
+
+    free_word_size = cur_alloc_region->free() / HeapWordSize;
+    // It's also possible that someone else beats us to the
+    // allocation and they fill up the region. In that case, we can
+    // just get out of the loop
+  }
+  assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
+         "sanity");
 
   retire_cur_alloc_region_common(cur_alloc_region);
   assert(_cur_alloc_region == NULL, "post-condition");
@@ -621,7 +800,7 @@
                                                        bool at_safepoint,
                                                        bool do_dirtying,
                                                        bool can_expand) {
-  assert_heap_locked_or_at_safepoint();
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   assert(_cur_alloc_region == NULL,
          "replace_cur_alloc_region_and_allocate() should only be called "
          "after retiring the previous current alloc region");
@@ -632,25 +811,12 @@
          "we are not allowed to expand the young gen");
 
   if (can_expand || !g1_policy()->is_young_list_full()) {
-    if (!at_safepoint) {
-      // The cleanup operation might update _summary_bytes_used
-      // concurrently with this method. So, right now, if we don't
-      // wait for it to complete, updates to _summary_bytes_used might
-      // get lost. This will be resolved in the near future when the
-      // operation of the free region list is revamped as part of
-      // CR 6977804. If we're already at a safepoint, this call is
-      // unnecessary, not to mention wrong.
-      wait_for_cleanup_complete();
-    }
-
-    HeapRegion* new_cur_alloc_region = newAllocRegion(word_size,
-                                                      false /* zero_filled */);
+    HeapRegion* new_cur_alloc_region = new_alloc_region(word_size);
     if (new_cur_alloc_region != NULL) {
       assert(new_cur_alloc_region->is_empty(),
              "the newly-allocated region should be empty, "
              "as right now we only allocate new regions out of the free list");
       g1_policy()->update_region_num(true /* next_is_young */);
-      _summary_bytes_used -= new_cur_alloc_region->used();
       set_region_short_lived_locked(new_cur_alloc_region);
 
       assert(!new_cur_alloc_region->isHumongous(),
@@ -661,27 +827,29 @@
       // young type.
       OrderAccess::storestore();
 
-      // Now allocate out of the new current alloc region. We could
-      // have re-used allocate_from_cur_alloc_region() but its
-      // operation is slightly different to what we need here. First,
-      // allocate_from_cur_alloc_region() is only called outside a
-      // safepoint and will always unlock the Heap_lock if it returns
-      // a non-NULL result. Second, it assumes that the current alloc
-      // region is what's already assigned in _cur_alloc_region. What
-      // we want here is to actually do the allocation first before we
-      // assign the new region to _cur_alloc_region. This ordering is
-      // not currently important, but it will be essential when we
-      // change the code to support CAS allocation in the future (see
-      // CR 6994297).
-      //
-      // This allocate method does BOT updates and we don't need them in
-      // the young generation. This will be fixed in the near future by
-      // CR 6994297.
-      HeapWord* result = new_cur_alloc_region->allocate(word_size);
+      // Now, perform the allocation out of the region we just
+      // allocated. Note that noone else can access that region at
+      // this point (as _cur_alloc_region has not been updated yet),
+      // so we can just go ahead and do the allocation without any
+      // atomics (and we expect this allocation attempt to
+      // suceeded). Given that other threads can attempt an allocation
+      // with a CAS and without needing the Heap_lock, if we assigned
+      // the new region to _cur_alloc_region before first allocating
+      // into it other threads might have filled up the new region
+      // before we got a chance to do the allocation ourselves. In
+      // that case, we would have needed to retire the region, grab a
+      // new one, and go through all this again. Allocating out of the
+      // new region before assigning it to _cur_alloc_region avoids
+      // all this.
+      HeapWord* result =
+                     new_cur_alloc_region->allocate_no_bot_updates(word_size);
       assert(result != NULL, "we just allocate out of an empty region "
              "so allocation should have been successful");
       assert(is_in(result), "result should be in the heap");
 
+      // Now make sure that the store to _cur_alloc_region does not
+      // float above the store to top.
+      OrderAccess::storestore();
       _cur_alloc_region = new_cur_alloc_region;
 
       if (!at_safepoint) {
@@ -698,7 +866,7 @@
 
   assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
          "alloc region, it should still be NULL");
-  assert_heap_locked_or_at_safepoint();
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   return NULL;
 }
 
@@ -710,6 +878,10 @@
   assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
          "used for humongous allocations");
 
+  // We should only reach here when we were unable to allocate
+  // otherwise. So, we should have not active current alloc region.
+  assert(_cur_alloc_region == NULL, "current alloc region should be NULL");
+
   // We will loop while succeeded is false, which means that we tried
   // to do a collection, but the VM op did not succeed. So, when we
   // exit the loop, either one of the allocation attempts was
@@ -718,28 +890,8 @@
   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
     bool succeeded = true;
 
-    {
-      // We may have concurrent cleanup working at the time. Wait for
-      // it to complete. In the future we would probably want to make
-      // the concurrent cleanup truly concurrent by decoupling it from
-      // the allocation. This will happen in the near future as part
-      // of CR 6977804 which will revamp the operation of the free
-      // region list. The fact that wait_for_cleanup_complete() will
-      // do a wait() means that we'll give up the Heap_lock. So, it's
-      // possible that when we exit wait_for_cleanup_complete() we
-      // might be able to allocate successfully (since somebody else
-      // might have done a collection meanwhile). So, we'll attempt to
-      // allocate again, just in case. When we make cleanup truly
-      // concurrent with allocation, we should remove this allocation
-      // attempt as it's redundant (we only reach here after an
-      // allocation attempt has been unsuccessful).
-      wait_for_cleanup_complete();
-      HeapWord* result = attempt_allocation(word_size);
-      if (result != NULL) {
-        assert_heap_not_locked();
-        return result;
-      }
-    }
+    // Every time we go round the loop we should be holding the Heap_lock.
+    assert_heap_locked();
 
     if (GC_locker::is_active_and_needs_gc()) {
       // We are locked out of GC because of the GC locker. We can
@@ -748,7 +900,6 @@
       if (g1_policy()->can_expand_young_list()) {
         // Yes, we are allowed to expand the young gen. Let's try to
         // allocate a new current alloc region.
-
         HeapWord* result =
           replace_cur_alloc_region_and_allocate(word_size,
                                                 false, /* at_safepoint */
@@ -771,20 +922,23 @@
       // rather than causing more, now probably unnecessary, GC attempts.
       JavaThread* jthr = JavaThread::current();
       assert(jthr != NULL, "sanity");
-      if (!jthr->in_critical()) {
-        MutexUnlocker mul(Heap_lock);
-        GC_locker::stall_until_clear();
-
-        // We'll then fall off the end of the ("if GC locker active")
-        // if-statement and retry the allocation further down in the
-        // loop.
-      } else {
+      if (jthr->in_critical()) {
         if (CheckJNICalls) {
           fatal("Possible deadlock due to allocating while"
                 " in jni critical section");
         }
+        // We are returning NULL so the protocol is that we're still
+        // holding the Heap_lock.
+        assert_heap_locked();
         return NULL;
       }
+
+      Heap_lock->unlock();
+      GC_locker::stall_until_clear();
+
+      // No need to relock the Heap_lock. We'll fall off to the code
+      // below the else-statement which assumes that we are not
+      // holding the Heap_lock.
     } else {
       // We are not locked out. So, let's try to do a GC. The VM op
       // will retry the allocation before it completes.
@@ -805,11 +959,10 @@
         dirty_young_block(result, word_size);
         return result;
       }
-
-      Heap_lock->lock();
     }
 
-    assert_heap_locked();
+    // Both paths that get us here from above unlock the Heap_lock.
+    assert_heap_not_locked();
 
     // We can reach here when we were unsuccessful in doing a GC,
     // because another thread beat us to it, or because we were locked
@@ -854,7 +1007,7 @@
   // allocation paths that attempt to allocate a humongous object
   // should eventually reach here. Currently, the only paths are from
   // mem_allocate() and attempt_allocation_at_safepoint().
-  assert_heap_locked_or_at_safepoint();
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   assert(isHumongous(word_size), "attempt_allocation_humongous() "
          "should only be used for humongous allocations");
   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
@@ -931,13 +1084,13 @@
     }
   }
 
-  assert_heap_locked_or_at_safepoint();
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   return NULL;
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
                                            bool expect_null_cur_alloc_region) {
-  assert_at_safepoint();
+  assert_at_safepoint(true /* should_be_vm_thread */);
   assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
          err_msg("the current alloc region was unexpectedly found "
                  "to be non-NULL, cur alloc region: "PTR_FORMAT" "
@@ -948,10 +1101,8 @@
     if (!expect_null_cur_alloc_region) {
       HeapRegion* cur_alloc_region = _cur_alloc_region;
       if (cur_alloc_region != NULL) {
-        // This allocate method does BOT updates and we don't need them in
-        // the young generation. This will be fixed in the near future by
-        // CR 6994297.
-        HeapWord* result = cur_alloc_region->allocate(word_size);
+        // We are at a safepoint so no reason to use the MT-safe version.
+        HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
         if (result != NULL) {
           assert(is_in(result), "result should be in the heap");
 
@@ -983,20 +1134,17 @@
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
 
-  Heap_lock->lock();
-
-  // First attempt: try allocating out of the current alloc region or
-  // after replacing the current alloc region.
+  // First attempt: Try allocating out of the current alloc region
+  // using a CAS. If that fails, take the Heap_lock and retry the
+  // allocation, potentially replacing the current alloc region.
   HeapWord* result = attempt_allocation(word_size);
   if (result != NULL) {
     assert_heap_not_locked();
     return result;
   }
 
-  assert_heap_locked();
-
-  // Second attempt: go into the even slower path where we might
-  // try to schedule a collection.
+  // Second attempt: Go to the slower path where we might try to
+  // schedule a collection.
   result = attempt_allocation_slow(word_size);
   if (result != NULL) {
     assert_heap_not_locked();
@@ -1004,6 +1152,7 @@
   }
 
   assert_heap_locked();
+  // Need to unlock the Heap_lock before returning.
   Heap_lock->unlock();
   return NULL;
 }
@@ -1022,11 +1171,10 @@
   for (int try_count = 1; /* we'll return */; try_count += 1) {
     unsigned int gc_count_before;
     {
-      Heap_lock->lock();
-
       if (!isHumongous(word_size)) {
-        // First attempt: try allocating out of the current alloc
-        // region or after replacing the current alloc region.
+        // First attempt: Try allocating out of the current alloc region
+        // using a CAS. If that fails, take the Heap_lock and retry the
+        // allocation, potentially replacing the current alloc region.
         HeapWord* result = attempt_allocation(word_size);
         if (result != NULL) {
           assert_heap_not_locked();
@@ -1035,14 +1183,17 @@
 
         assert_heap_locked();
 
-        // Second attempt: go into the even slower path where we might
-        // try to schedule a collection.
+        // Second attempt: Go to the slower path where we might try to
+        // schedule a collection.
         result = attempt_allocation_slow(word_size);
         if (result != NULL) {
           assert_heap_not_locked();
           return result;
         }
       } else {
+        // attempt_allocation_humongous() requires the Heap_lock to be held.
+        Heap_lock->lock();
+
         HeapWord* result = attempt_allocation_humongous(word_size,
                                                      false /* at_safepoint */);
         if (result != NULL) {
@@ -1054,7 +1205,8 @@
       assert_heap_locked();
       // Read the gc count while the heap lock is held.
       gc_count_before = SharedHeap::heap()->total_collections();
-      // We cannot be at a safepoint, so it is safe to unlock the Heap_lock
+
+      // Release the Heap_lock before attempting the collection.
       Heap_lock->unlock();
     }
 
@@ -1092,22 +1244,18 @@
 }
 
 void G1CollectedHeap::abandon_cur_alloc_region() {
-  if (_cur_alloc_region != NULL) {
-    // We're finished with the _cur_alloc_region.
-    if (_cur_alloc_region->is_empty()) {
-      _free_regions++;
-      free_region(_cur_alloc_region);
-    } else {
-      // As we're builing (at least the young portion) of the collection
-      // set incrementally we'll add the current allocation region to
-      // the collection set here.
-      if (_cur_alloc_region->is_young()) {
-        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
-      }
-      _summary_bytes_used += _cur_alloc_region->used();
-    }
-    _cur_alloc_region = NULL;
-  }
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  HeapRegion* cur_alloc_region = _cur_alloc_region;
+  if (cur_alloc_region != NULL) {
+    assert(!cur_alloc_region->is_empty(),
+           "the current alloc region can never be empty");
+    assert(cur_alloc_region->is_young(),
+           "the current alloc region should be young");
+
+    retire_cur_alloc_region_common(cur_alloc_region);
+  }
+  assert(_cur_alloc_region == NULL, "post-condition");
 }
 
 void G1CollectedHeap::abandon_gc_alloc_regions() {
@@ -1188,18 +1336,20 @@
 bool G1CollectedHeap::do_collection(bool explicit_gc,
                                     bool clear_all_soft_refs,
                                     size_t word_size) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
   if (GC_locker::check_active_before_gc()) {
     return false;
   }
 
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
   ResourceMark rm;
 
   if (PrintHeapAtGC) {
     Universe::print_heap_before_gc();
   }
 
-  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
-  assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
+  verify_region_sets_optional();
 
   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
                            collector_policy()->should_clear_all_soft_refs();
@@ -1222,6 +1372,9 @@
     double start = os::elapsedTime();
     g1_policy()->record_full_collection_start();
 
+    wait_while_free_regions_coming();
+    append_secondary_free_list_if_not_empty();
+
     gc_prologue(true);
     increment_total_collections(true /* full gc */);
 
@@ -1234,7 +1387,6 @@
       gclog_or_tty->print(" VerifyBeforeGC:");
       Universe::verify(true);
     }
-    assert(regions_accounted_for(), "Region leakage!");
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
@@ -1258,7 +1410,6 @@
     assert(_cur_alloc_region == NULL, "Invariant.");
     g1_rem_set()->cleanupHRRS();
     tear_down_region_lists();
-    set_used_regions_to_need_zero_fill();
 
     // We may have added regions to the current incremental collection
     // set between the last GC or pause and now. We need to clear the
@@ -1293,9 +1444,7 @@
       HandleMark hm;  // Discard invalid handles created during gc
       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
     }
-    // Because freeing humongous regions may have added some unclean
-    // regions, it is necessary to tear down again before rebuilding.
-    tear_down_region_lists();
+    assert(free_regions() == 0, "we should not have added any free regions");
     rebuild_region_lists();
 
     _summary_bytes_used = recalculate_used();
@@ -1377,7 +1526,6 @@
     JavaThread::dirty_card_queue_set().abandon_logs();
     assert(!G1DeferredRSUpdate
            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
-    assert(regions_accounted_for(), "Region leakage!");
   }
 
   if (g1_policy()->in_young_gc_mode()) {
@@ -1389,7 +1537,9 @@
   }
 
   // Update the number of full collections that have been completed.
-  increment_full_collections_completed(false /* outer */);
+  increment_full_collections_completed(false /* concurrent */);
+
+  verify_region_sets_optional();
 
   if (PrintHeapAtGC) {
     Universe::print_heap_after_gc();
@@ -1531,10 +1681,7 @@
 HeapWord*
 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
                                            bool* succeeded) {
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "satisfy_failed_allocation() should only be called at a safepoint");
-  assert(Thread::current()->is_VM_thread(),
-         "satisfy_failed_allocation() should only be called by the VM thread");
+  assert_at_safepoint(true /* should_be_vm_thread */);
 
   *succeeded = true;
   // Let's attempt the allocation first.
@@ -1606,53 +1753,22 @@
 // allocated block, or else "NULL".
 
 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "expand_and_allocate() should only be called at a safepoint");
-  assert(Thread::current()->is_VM_thread(),
-         "expand_and_allocate() should only be called by the VM thread");
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  verify_region_sets_optional();
 
   size_t expand_bytes = word_size * HeapWordSize;
   if (expand_bytes < MinHeapDeltaBytes) {
     expand_bytes = MinHeapDeltaBytes;
   }
   expand(expand_bytes);
-  assert(regions_accounted_for(), "Region leakage!");
+
+  verify_region_sets_optional();
 
   return attempt_allocation_at_safepoint(word_size,
                                      false /* expect_null_cur_alloc_region */);
 }
 
-size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
-  size_t pre_used = 0;
-  size_t cleared_h_regions = 0;
-  size_t freed_regions = 0;
-  UncleanRegionList local_list;
-  free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
-                                    freed_regions, &local_list);
-
-  finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
-                          &local_list);
-  return pre_used;
-}
-
-void
-G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
-                                                   size_t& pre_used,
-                                                   size_t& cleared_h,
-                                                   size_t& freed_regions,
-                                                   UncleanRegionList* list,
-                                                   bool par) {
-  assert(!hr->continuesHumongous(), "should have filtered these out");
-  size_t res = 0;
-  if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
-      !hr->is_young()) {
-    if (G1PolicyVerbose > 0)
-      gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
-                                                                               " during cleanup", hr, hr->used());
-    free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
-  }
-}
-
 // FIXME: both this and shrink could probably be more efficient by
 // doing one "VirtualSpace::expand_by" call rather than several.
 void G1CollectedHeap::expand(size_t expand_bytes) {
@@ -1685,19 +1801,7 @@
 
       // Add it to the HeapRegionSeq.
       _hrs->insert(hr);
-      // Set the zero-fill state, according to whether it's already
-      // zeroed.
-      {
-        MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-        if (is_zeroed) {
-          hr->set_zero_fill_complete();
-          put_free_region_on_list_locked(hr);
-        } else {
-          hr->set_zero_fill_needed();
-          put_region_on_unclean_list_locked(hr);
-        }
-      }
-      _free_regions++;
+      _free_list.add_as_tail(hr);
       // And we used up an expansion region to create it.
       _expansion_regions--;
       // Tell the cardtable about it.
@@ -1706,6 +1810,7 @@
       _bot_shared->resize(_g1_committed.word_size());
     }
   }
+
   if (Verbose && PrintGC) {
     size_t new_mem_size = _g1_storage.committed_size();
     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
@@ -1730,7 +1835,6 @@
   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
 
   _g1_committed.set_end(mr.start());
-  _free_regions -= num_regions_deleted;
   _expansion_regions += num_regions_deleted;
 
   // Tell the cardtable about it.
@@ -1750,10 +1854,17 @@
 }
 
 void G1CollectedHeap::shrink(size_t shrink_bytes) {
+  verify_region_sets_optional();
+
   release_gc_alloc_regions(true /* totally */);
+  // Instead of tearing down / rebuilding the free lists here, we
+  // could instead use the remove_all_pending() method on free_list to
+  // remove only the ones that we need to remove.
   tear_down_region_lists();  // We will rebuild them in a moment.
   shrink_helper(shrink_bytes);
   rebuild_region_lists();
+
+  verify_region_sets_optional();
 }
 
 // Public methods.
@@ -1768,21 +1879,21 @@
   _g1_policy(policy_),
   _dirty_card_queue_set(false),
   _into_cset_dirty_card_queue_set(false),
+  _is_alive_closure(this),
   _ref_processor(NULL),
   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
   _bot_shared(NULL),
-  _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
   _evac_failure_scan_stack(NULL) ,
   _mark_in_progress(false),
-  _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
+  _cg1r(NULL), _summary_bytes_used(0),
   _cur_alloc_region(NULL),
   _refine_cte_cl(NULL),
-  _free_region_list(NULL), _free_region_list_size(0),
-  _free_regions(0),
   _full_collection(false),
-  _unclean_region_list(),
-  _unclean_regions_coming(false),
+  _free_list("Master Free List"),
+  _secondary_free_list("Secondary Free List"),
+  _humongous_set("Master Humongous Set"),
+  _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
   _surviving_young_words(NULL),
@@ -1866,7 +1977,7 @@
 
   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
                         HeapRegion::GrainBytes,
-                        false /*ism*/, addr);
+                        UseLargePages, addr);
 
   if (UseCompressedOops) {
     if (addr != NULL && !heap_rs.is_reserved()) {
@@ -1875,13 +1986,13 @@
       // Try again to reserver heap higher.
       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
-                             false /*ism*/, addr);
+                             UseLargePages, addr);
       if (addr != NULL && !heap_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
-                               false /*ism*/, addr);
+                               UseLargePages, addr);
         heap_rs = heap_rs1;
       } else {
         heap_rs = heap_rs0;
@@ -1903,8 +2014,6 @@
 
   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
 
-  _num_humongous_regions = 0;
-
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   set_barrier_set(rem_set()->bs());
@@ -1949,6 +2058,8 @@
   guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
             "too many cards per region");
 
+  HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
+
   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
                                              heap_word_size(init_byte_size));
 
@@ -1973,11 +2084,6 @@
   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
   _cmThread = _cm->cmThread();
 
-  // ...and the concurrent zero-fill thread, if necessary.
-  if (G1ConcZeroFill) {
-    _czft = new ConcurrentZFThread();
-  }
-
   // Initialize the from_card cache structure of HeapRegionRemSet.
   HeapRegionRemSet::init_heap(max_regions());
 
@@ -2061,7 +2167,8 @@
                                          mr,    // span
                                          false, // Reference discovery is not atomic
                                          true,  // mt_discovery
-                                         NULL,  // is alive closure: need to fill this in for efficiency
+                                         &_is_alive_closure, // is alive closure
+                                                             // for efficiency
                                          ParallelGCThreads,
                                          ParallelRefProcEnabled,
                                          true); // Setting next fields of discovered
@@ -2150,7 +2257,7 @@
 #endif // PRODUCT
 
 size_t G1CollectedHeap::unsafe_max_alloc() {
-  if (_free_regions > 0) return HeapRegion::GrainBytes;
+  if (free_regions() > 0) return HeapRegion::GrainBytes;
   // otherwise, is there space in the current allocation region?
 
   // We need to store the current allocation region in a local variable
@@ -2176,9 +2283,14 @@
      (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 }
 
-void G1CollectedHeap::increment_full_collections_completed(bool outer) {
+void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 
+  // We assume that if concurrent == true, then the caller is a
+  // concurrent thread that was joined the Suspendible Thread
+  // Set. If there's ever a cheap way to check this, we should add an
+  // assert here.
+
   // We have already incremented _total_full_collections at the start
   // of the GC, so total_full_collections() represents how many full
   // collections have been started.
@@ -2192,17 +2304,18 @@
   // behind the number of full collections started.
 
   // This is the case for the inner caller, i.e. a Full GC.
-  assert(outer ||
+  assert(concurrent ||
          (full_collections_started == _full_collections_completed + 1) ||
          (full_collections_started == _full_collections_completed + 2),
-         err_msg("for inner caller: full_collections_started = %u "
+         err_msg("for inner caller (Full GC): full_collections_started = %u "
                  "is inconsistent with _full_collections_completed = %u",
                  full_collections_started, _full_collections_completed));
 
   // This is the case for the outer caller, i.e. the concurrent cycle.
-  assert(!outer ||
+  assert(!concurrent ||
          (full_collections_started == _full_collections_completed + 1),
-         err_msg("for outer caller: full_collections_started = %u "
+         err_msg("for outer caller (concurrent cycle): "
+                 "full_collections_started = %u "
                  "is inconsistent with _full_collections_completed = %u",
                  full_collections_started, _full_collections_completed));
 
@@ -2212,7 +2325,7 @@
   // we wake up any waiters (especially when ExplicitInvokesConcurrent
   // is set) so that if a waiter requests another System.gc() it doesn't
   // incorrectly see that a marking cyle is still in progress.
-  if (outer) {
+  if (concurrent) {
     _cmThread->clear_in_progress();
   }
 
@@ -2224,8 +2337,7 @@
 }
 
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
-  assert(Thread::current()->is_VM_thread(), "Precondition#1");
-  assert(Heap_lock->is_locked(), "Precondition#2");
+  assert_at_safepoint(true /* should_be_vm_thread */);
   GCCauseSetter gcs(this, cause);
   switch (cause) {
     case GCCause::_heap_inspection:
@@ -2248,12 +2360,6 @@
   {
     MutexLocker ml(Heap_lock);
 
-    // Don't want to do a GC until cleanup is completed. This
-    // limitation will be removed in the near future when the
-    // operation of the free region list is revamped as part of
-    // CR 6977804.
-    wait_for_cleanup_complete();
-
     // Read the GC count while holding the Heap_lock
     gc_count_before = SharedHeap::heap()->total_collections();
     full_gc_count_before = SharedHeap::heap()->total_full_collections();
@@ -2632,10 +2738,6 @@
   }
 }
 
-bool G1CollectedHeap::allocs_are_zero_filled() {
-  return false;
-}
-
 size_t G1CollectedHeap::large_typearray_limit() {
   // FIXME
   return HeapRegion::GrainBytes/HeapWordSize;
@@ -2650,7 +2752,6 @@
   return 0;
 }
 
-
 void G1CollectedHeap::prepare_for_verify() {
   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
     ensure_parsability(false);
@@ -2861,7 +2962,9 @@
                          &rootsCl);
     bool failures = rootsCl.failures();
     rem_set()->invalidate(perm_gen()->used_region(), false);
-    if (!silent) { gclog_or_tty->print("heapRegions "); }
+    if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
+    verify_region_sets();
+    if (!silent) { gclog_or_tty->print("HeapRegions "); }
     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
              "sanity check");
@@ -2889,7 +2992,7 @@
         failures = true;
       }
     }
-    if (!silent) gclog_or_tty->print("remset ");
+    if (!silent) gclog_or_tty->print("RemSet ");
     rem_set()->verify();
 
     if (failures) {
@@ -2960,15 +3063,10 @@
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     workers()->print_worker_threads_on(st);
   }
-
   _cmThread->print_on(st);
   st->cr();
-
   _cm->print_worker_threads_on(st);
-
   _cg1r->print_worker_threads_on(st);
-
-  _czft->print_on(st);
   st->cr();
 }
 
@@ -2978,7 +3076,6 @@
   }
   tc->do_thread(_cmThread);
   _cg1r->threads_do(tc);
-  tc->do_thread(_czft);
 }
 
 void G1CollectedHeap::print_tracing_info() const {
@@ -2994,15 +3091,10 @@
   if (G1SummarizeConcMark) {
     concurrent_mark()->print_summary_info();
   }
-  if (G1SummarizeZFStats) {
-    ConcurrentZFThread::print_summary_info();
-  }
   g1_policy()->print_yg_surv_rate_info();
-
   SpecializationStats::print();
 }
 
-
 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
   HeapRegion* hr = heap_region_containing(addr);
   if (hr == NULL) {
@@ -3201,17 +3293,23 @@
 
 bool
 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+  guarantee(!is_gc_active(), "collection is not reentrant");
+
   if (GC_locker::check_active_before_gc()) {
     return false;
   }
 
+  SvcGCMarker sgcm(SvcGCMarker::MINOR);
+  ResourceMark rm;
+
   if (PrintHeapAtGC) {
     Universe::print_heap_before_gc();
   }
 
+  verify_region_sets_optional();
+
   {
-    ResourceMark rm;
-
     // This call will decide whether this pause is an initial-mark
     // pause. If it is, during_initial_mark_pause() will return true
     // for the duration of this pause.
@@ -3241,10 +3339,16 @@
 
     TraceMemoryManagerStats tms(false /* fullGC */);
 
-    assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
-    assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
-    guarantee(!is_gc_active(), "collection is not reentrant");
-    assert(regions_accounted_for(), "Region leakage!");
+    // If there are any free regions available on the secondary_free_list
+    // make sure we append them to the free_list. However, we don't
+    // have to wait for the rest of the cleanup operation to
+    // finish. If it's still going on that's OK. If we run out of
+    // regions, the region allocation code will check the
+    // secondary_free_list and potentially wait if more free regions
+    // are coming (see new_region_try_secondary_free_list()).
+    if (!G1StressConcRegionFreeing) {
+      append_secondary_free_list_if_not_empty();
+    }
 
     increment_gc_time_stamp();
 
@@ -3324,8 +3428,6 @@
       // progress, this will be zero.
       _cm->set_oops_do_bound();
 
-      assert(regions_accounted_for(), "Region leakage.");
-
       if (mark_in_progress())
         concurrent_mark()->newCSet();
 
@@ -3421,8 +3523,6 @@
       g1_policy()->record_pause_time_ms(pause_time_ms);
       g1_policy()->record_collection_pause_end();
 
-      assert(regions_accounted_for(), "Region leakage.");
-
       MemoryService::track_memory_usage();
 
       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
@@ -3453,8 +3553,6 @@
       gc_epilogue(false);
     }
 
-    assert(verify_region_lists(), "Bad region lists.");
-
     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
       print_tracing_info();
@@ -3462,6 +3560,8 @@
     }
   }
 
+  verify_region_sets_optional();
+
   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 
@@ -3568,7 +3668,7 @@
 
 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
   assert(Thread::current()->is_VM_thread() ||
-         par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
+         FreeList_lock->owned_by_self(), "Precondition");
   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
          "Precondition.");
   hr->set_is_gc_alloc_region(true);
@@ -3590,7 +3690,7 @@
 #endif // G1_DEBUG
 
 void G1CollectedHeap::forget_alloc_region_list() {
-  assert(Thread::current()->is_VM_thread(), "Precondition");
+  assert_at_safepoint(true /* should_be_vm_thread */);
   while (_gc_alloc_region_list != NULL) {
     HeapRegion* r = _gc_alloc_region_list;
     assert(r->is_gc_alloc_region(), "Invariant.");
@@ -3610,9 +3710,6 @@
         _young_list->add_survivor_region(r);
       }
     }
-    if (r->is_empty()) {
-      ++_free_regions;
-    }
   }
 #ifdef G1_DEBUG
   FindGCAllocRegion fa;
@@ -3665,7 +3762,7 @@
 
     if (alloc_region == NULL) {
       // we will get a new GC alloc region
-      alloc_region = newAllocRegionWithExpansion(ap, 0);
+      alloc_region = new_gc_alloc_region(ap, 0);
     } else {
       // the region was retained from the last collection
       ++_gc_alloc_region_counts[ap];
@@ -3720,11 +3817,9 @@
       set_gc_alloc_region(ap, NULL);
 
       if (r->is_empty()) {
-        // we didn't actually allocate anything in it; let's just put
-        // it on the free list
-        MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-        r->set_zero_fill_complete();
-        put_free_region_on_list_locked(r);
+        // We didn't actually allocate anything in it; let's just put
+        // it back on the free list.
+        _free_list.add_as_tail(r);
       } else if (_retain_gc_alloc_region[ap] && !totally) {
         // retain it so that we can use it at the beginning of the next GC
         _retained_gc_alloc_regions[ap] = r;
@@ -3846,13 +3941,15 @@
   size_t _next_marked_bytes;
   OopsInHeapRegionClosure *_cl;
 public:
-  RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
-    _g1(g1), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
+  RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr,
+                           OopsInHeapRegionClosure* cl) :
+    _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
     _next_marked_bytes(0), _cl(cl) {}
 
   size_t prev_marked_bytes() { return _prev_marked_bytes; }
   size_t next_marked_bytes() { return _next_marked_bytes; }
 
+  // <original comment>
   // The original idea here was to coalesce evacuated and dead objects.
   // However that caused complications with the block offset table (BOT).
   // In particular if there were two TLABs, one of them partially refined.
@@ -3861,15 +3958,24 @@
   // of TLAB_2. If the last object of the TLAB_1 and the first object
   // of TLAB_2 are coalesced, then the cards of the unrefined part
   // would point into middle of the filler object.
+  // The current approach is to not coalesce and leave the BOT contents intact.
+  // </original comment>
   //
-  // The current approach is to not coalesce and leave the BOT contents intact.
+  // We now reset the BOT when we start the object iteration over the
+  // region and refine its entries for every object we come across. So
+  // the above comment is not really relevant and we should be able
+  // to coalesce dead objects if we want to.
   void do_object(oop obj) {
+    HeapWord* obj_addr = (HeapWord*) obj;
+    assert(_hr->is_in(obj_addr), "sanity");
+    size_t obj_size = obj->size();
+    _hr->update_bot_for_object(obj_addr, obj_size);
     if (obj->is_forwarded() && obj->forwardee() == obj) {
       // The object failed to move.
       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
       _cm->markPrev(obj);
       assert(_cm->isPrevMarked(obj), "Should be marked!");
-      _prev_marked_bytes += (obj->size() * HeapWordSize);
+      _prev_marked_bytes += (obj_size * HeapWordSize);
       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
         _cm->markAndGrayObjectIfNecessary(obj);
       }
@@ -3891,7 +3997,7 @@
     } else {
       // The object has been either evacuated or is dead. Fill it with a
       // dummy object.
-      MemRegion mr((HeapWord*)obj, obj->size());
+      MemRegion mr((HeapWord*)obj, obj_size);
       CollectedHeap::fill_with_object(mr);
       _cm->clearRangeBothMaps(mr);
     }
@@ -3911,10 +4017,13 @@
   HeapRegion* cur = g1_policy()->collection_set();
   while (cur != NULL) {
     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
-
-    RemoveSelfPointerClosure rspc(_g1h, cl);
+    assert(!cur->isHumongous(), "sanity");
+
     if (cur->evacuation_failed()) {
       assert(cur->in_collection_set(), "bad CS");
+      RemoveSelfPointerClosure rspc(_g1h, cur, cl);
+
+      cur->reset_bot();
       cl->set_region(cur);
       cur->object_iterate(&rspc);
 
@@ -3950,8 +4059,6 @@
   // Now restore saved marks, if any.
   if (_objs_with_preserved_marks != NULL) {
     assert(_preserved_marks_of_objs != NULL, "Both or none.");
-    assert(_objs_with_preserved_marks->length() ==
-           _preserved_marks_of_objs->length(), "Both or none.");
     guarantee(_objs_with_preserved_marks->length() ==
               _preserved_marks_of_objs->length(), "Both or none.");
     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
@@ -3981,15 +4088,6 @@
   }
 }
 
-void G1CollectedHeap::handle_evacuation_failure(oop old) {
-  markOop m = old->mark();
-  // forward to self
-  assert(!old->is_forwarded(), "precondition");
-
-  old->forward_to(old);
-  handle_evacuation_failure_common(old, m);
-}
-
 oop
 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
                                                oop old) {
@@ -4046,7 +4144,10 @@
 }
 
 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
-  if (m != markOopDesc::prototype()) {
+  assert(evacuation_failed(), "Oversaving!");
+  // We want to call the "for_promotion_failure" version only in the
+  // case of a promotion failure.
+  if (m->must_be_preserved_for_promotion_failure(obj)) {
     if (_objs_with_preserved_marks == NULL) {
       assert(_preserved_marks_of_objs == NULL, "Both or none.");
       _objs_with_preserved_marks =
@@ -4073,8 +4174,6 @@
 
   HeapWord* block = alloc_region->par_allocate(word_size);
   if (block == NULL) {
-    MutexLockerEx x(par_alloc_during_gc_lock(),
-                    Mutex::_no_safepoint_check_flag);
     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
   }
   return block;
@@ -4103,6 +4202,12 @@
          err_msg("we should not be seeing humongous allocation requests "
                  "during GC, word_size = "SIZE_FORMAT, word_size));
 
+  // We need to make sure we serialize calls to this method. Given
+  // that the FreeList_lock guards accesses to the free_list anyway,
+  // and we need to potentially remove a region from it, we'll use it
+  // to protect the whole call.
+  MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+
   HeapWord* block = NULL;
   // In the parallel case, a previous thread to obtain the lock may have
   // already assigned a new gc_alloc_region.
@@ -4148,7 +4253,7 @@
   }
 
   // Now allocate a new region for allocation.
-  alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
+  alloc_region = new_gc_alloc_region(purpose, word_size);
 
   // let the caller handle alloc failure
   if (alloc_region != NULL) {
@@ -4156,9 +4261,6 @@
     assert(check_gc_alloc_regions(), "alloc regions messed up");
     assert(alloc_region->saved_mark_at_top(),
            "Mark should have been saved already.");
-    // We used to assert that the region was zero-filled here, but no
-    // longer.
-
     // This must be done last: once it's installed, other regions may
     // allocate in it (without holding the lock.)
     set_gc_alloc_region(purpose, alloc_region);
@@ -4823,91 +4925,91 @@
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 }
 
-void G1CollectedHeap::free_region(HeapRegion* hr) {
-  size_t pre_used = 0;
-  size_t cleared_h_regions = 0;
-  size_t freed_regions = 0;
-  UncleanRegionList local_list;
-
-  HeapWord* start = hr->bottom();
-  HeapWord* end   = hr->prev_top_at_mark_start();
-  size_t used_bytes = hr->used();
-  size_t live_bytes = hr->max_live_bytes();
-  if (used_bytes > 0) {
-    guarantee( live_bytes <= used_bytes, "invariant" );
-  } else {
-    guarantee( live_bytes == 0, "invariant" );
-  }
-
-  size_t garbage_bytes = used_bytes - live_bytes;
-  if (garbage_bytes > 0)
-    g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
-
-  free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
-                   &local_list);
-  finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
-                          &local_list);
-}
-
-void
-G1CollectedHeap::free_region_work(HeapRegion* hr,
-                                  size_t& pre_used,
-                                  size_t& cleared_h_regions,
-                                  size_t& freed_regions,
-                                  UncleanRegionList* list,
-                                  bool par) {
-  pre_used += hr->used();
-  if (hr->isHumongous()) {
-    assert(hr->startsHumongous(),
-           "Only the start of a humongous region should be freed.");
-    int ind = _hrs->find(hr);
-    assert(ind != -1, "Should have an index.");
-    // Clear the start region.
-    hr->hr_clear(par, true /*clear_space*/);
-    list->insert_before_head(hr);
-    cleared_h_regions++;
-    freed_regions++;
-    // Clear any continued regions.
-    ind++;
-    while ((size_t)ind < n_regions()) {
-      HeapRegion* hrc = _hrs->at(ind);
-      if (!hrc->continuesHumongous()) break;
-      // Otherwise, does continue the H region.
-      assert(hrc->humongous_start_region() == hr, "Huh?");
-      hrc->hr_clear(par, true /*clear_space*/);
-      cleared_h_regions++;
-      freed_regions++;
-      list->insert_before_head(hrc);
-      ind++;
+void G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr,
+                                     size_t* pre_used,
+                                     FreeRegionList* free_list,
+                                     HumongousRegionSet* humongous_proxy_set,
+                                     bool par) {
+  if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
+    if (hr->isHumongous()) {
+      assert(hr->startsHumongous(), "we should only see starts humongous");
+      free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
+    } else {
+      free_region(hr, pre_used, free_list, par);
     }
-  } else {
-    hr->hr_clear(par, true /*clear_space*/);
-    list->insert_before_head(hr);
-    freed_regions++;
-    // If we're using clear2, this should not be enabled.
-    // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
   }
 }
 
-void G1CollectedHeap::finish_free_region_work(size_t pre_used,
-                                              size_t cleared_h_regions,
-                                              size_t freed_regions,
-                                              UncleanRegionList* list) {
-  if (list != NULL && list->sz() > 0) {
-    prepend_region_list_on_unclean_list(list);
-  }
-  // Acquire a lock, if we're parallel, to update possibly-shared
-  // variables.
-  Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
-  {
+void G1CollectedHeap::free_region(HeapRegion* hr,
+                                  size_t* pre_used,
+                                  FreeRegionList* free_list,
+                                  bool par) {
+  assert(!hr->isHumongous(), "this is only for non-humongous regions");
+  assert(!hr->is_empty(), "the region should not be empty");
+  assert(free_list != NULL, "pre-condition");
+
+  *pre_used += hr->used();
+  hr->hr_clear(par, true /* clear_space */);
+  free_list->add_as_tail(hr);
+}
+
+void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
+                                     size_t* pre_used,
+                                     FreeRegionList* free_list,
+                                     HumongousRegionSet* humongous_proxy_set,
+                                     bool par) {
+  assert(hr->startsHumongous(), "this is only for starts humongous regions");
+  assert(free_list != NULL, "pre-condition");
+  assert(humongous_proxy_set != NULL, "pre-condition");
+
+  size_t hr_used = hr->used();
+  size_t hr_capacity = hr->capacity();
+  size_t hr_pre_used = 0;
+  _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
+  hr->set_notHumongous();
+  free_region(hr, &hr_pre_used, free_list, par);
+
+  int i = hr->hrs_index() + 1;
+  size_t num = 1;
+  while ((size_t) i < n_regions()) {
+    HeapRegion* curr_hr = _hrs->at(i);
+    if (!curr_hr->continuesHumongous()) {
+      break;
+    }
+    curr_hr->set_notHumongous();
+    free_region(curr_hr, &hr_pre_used, free_list, par);
+    num += 1;
+    i += 1;
+  }
+  assert(hr_pre_used == hr_used,
+         err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
+                 "should be the same", hr_pre_used, hr_used));
+  *pre_used += hr_pre_used;
+}
+
+void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
+                                       FreeRegionList* free_list,
+                                       HumongousRegionSet* humongous_proxy_set,
+                                       bool par) {
+  if (pre_used > 0) {
+    Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
+    assert(_summary_bytes_used >= pre_used,
+           err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
+                   "should be >= pre_used: "SIZE_FORMAT,
+                   _summary_bytes_used, pre_used));
     _summary_bytes_used -= pre_used;
-    _num_humongous_regions -= (int) cleared_h_regions;
-    _free_regions += freed_regions;
+  }
+  if (free_list != NULL && !free_list->is_empty()) {
+    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+    _free_list.add_as_tail(free_list);
+  }
+  if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
+    MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
+    _humongous_set.update_from_proxy(humongous_proxy_set);
   }
 }
 
-
 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
   while (list != NULL) {
     guarantee( list->is_young(), "invariant" );
@@ -5030,6 +5132,9 @@
 }
 
 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
+  size_t pre_used = 0;
+  FreeRegionList local_free_list("Local List for CSet Freeing");
+
   double young_time_ms     = 0.0;
   double non_young_time_ms = 0.0;
 
@@ -5048,6 +5153,8 @@
   size_t rs_lengths = 0;
 
   while (cur != NULL) {
+    assert(!is_on_free_list(cur), "sanity");
+
     if (non_young) {
       if (cur->is_young()) {
         double end_sec = os::elapsedTime();
@@ -5058,14 +5165,12 @@
         non_young = false;
       }
     } else {
-      if (!cur->is_on_free_list()) {
-        double end_sec = os::elapsedTime();
-        double elapsed_ms = (end_sec - start_sec) * 1000.0;
-        young_time_ms += elapsed_ms;
-
-        start_sec = os::elapsedTime();
-        non_young = true;
-      }
+      double end_sec = os::elapsedTime();
+      double elapsed_ms = (end_sec - start_sec) * 1000.0;
+      young_time_ms += elapsed_ms;
+
+      start_sec = os::elapsedTime();
+      non_young = true;
     }
 
     rs_lengths += cur->rem_set()->occupied();
@@ -5098,9 +5203,8 @@
 
     if (!cur->evacuation_failed()) {
       // And the region is empty.
-      assert(!cur->is_empty(),
-             "Should not have empty regions in a CS.");
-      free_region(cur);
+      assert(!cur->is_empty(), "Should not have empty regions in a CS.");
+      free_region(cur, &pre_used, &local_free_list, false /* par */);
     } else {
       cur->uninstall_surv_rate_group();
       if (cur->is_young())
@@ -5121,6 +5225,9 @@
   else
     young_time_ms += elapsed_ms;
 
+  update_sets_after_freeing_regions(pre_used, &local_free_list,
+                                    NULL /* humongous_proxy_set */,
+                                    false /* par */);
   policy->record_young_free_cset_time_ms(young_time_ms);
   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
 }
@@ -5146,291 +5253,53 @@
   }
 }
 
-HeapRegion*
-G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
-  assert(ZF_mon->owned_by_self(), "Precondition");
-  HeapRegion* res = pop_unclean_region_list_locked();
-  if (res != NULL) {
-    assert(!res->continuesHumongous() &&
-           res->zero_fill_state() != HeapRegion::Allocated,
-           "Only free regions on unclean list.");
-    if (zero_filled) {
-      res->ensure_zero_filled_locked();
-      res->set_zero_fill_allocated();
-    }
-  }
-  return res;
-}
-
-HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
-  MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
-  return alloc_region_from_unclean_list_locked(zero_filled);
+void G1CollectedHeap::set_free_regions_coming() {
+  if (G1ConcRegionFreeingVerbose) {
+    gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
+                           "setting free regions coming");
+  }
+
+  assert(!free_regions_coming(), "pre-condition");
+  _free_regions_coming = true;
 }
 
-void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  put_region_on_unclean_list_locked(r);
-  if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
-}
-
-void G1CollectedHeap::set_unclean_regions_coming(bool b) {
-  MutexLockerEx x(Cleanup_mon);
-  set_unclean_regions_coming_locked(b);
-}
-
-void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
-  assert(Cleanup_mon->owned_by_self(), "Precondition");
-  _unclean_regions_coming = b;
-  // Wake up mutator threads that might be waiting for completeCleanup to
-  // finish.
-  if (!b) Cleanup_mon->notify_all();
-}
-
-void G1CollectedHeap::wait_for_cleanup_complete() {
-  assert_not_at_safepoint();
-  MutexLockerEx x(Cleanup_mon);
-  wait_for_cleanup_complete_locked();
-}
-
-void G1CollectedHeap::wait_for_cleanup_complete_locked() {
-  assert(Cleanup_mon->owned_by_self(), "precondition");
-  while (_unclean_regions_coming) {
-    Cleanup_mon->wait();
+void G1CollectedHeap::reset_free_regions_coming() {
+  {
+    assert(free_regions_coming(), "pre-condition");
+    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
+    _free_regions_coming = false;
+    SecondaryFreeList_lock->notify_all();
+  }
+
+  if (G1ConcRegionFreeingVerbose) {
+    gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
+                           "reset free regions coming");
   }
 }
 
-void
-G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-#ifdef ASSERT
-  if (r->is_gc_alloc_region()) {
-    ResourceMark rm;
-    stringStream region_str;
-    print_on(&region_str);
-    assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s",
-                                             region_str.as_string()));
-  }
-#endif
-  _unclean_region_list.insert_before_head(r);
-}
-
-void
-G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  prepend_region_list_on_unclean_list_locked(list);
-  if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
-}
-
-void
-G1CollectedHeap::
-prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-  _unclean_region_list.prepend_list(list);
-}
-
-HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-  HeapRegion* res = _unclean_region_list.pop();
-  if (res != NULL) {
-    // Inform ZF thread that there's a new unclean head.
-    if (_unclean_region_list.hd() != NULL && should_zf())
-      ZF_mon->notify_all();
-  }
-  return res;
-}
-
-HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-  return _unclean_region_list.hd();
-}
-
-
-bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
-  assert(ZF_mon->owned_by_self(), "Precondition");
-  HeapRegion* r = peek_unclean_region_list_locked();
-  if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
-    // Result of below must be equal to "r", since we hold the lock.
-    (void)pop_unclean_region_list_locked();
-    put_free_region_on_list_locked(r);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-bool G1CollectedHeap::move_cleaned_region_to_free_list() {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  return move_cleaned_region_to_free_list_locked();
-}
-
-
-void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-  assert(_free_region_list_size == free_region_list_length(), "Inv");
-  assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
-        "Regions on free list must be zero filled");
-  assert(!r->isHumongous(), "Must not be humongous.");
-  assert(r->is_empty(), "Better be empty");
-  assert(!r->is_on_free_list(),
-         "Better not already be on free list");
-  assert(!r->is_on_unclean_list(),
-         "Better not already be on unclean list");
-  r->set_on_free_list(true);
-  r->set_next_on_free_list(_free_region_list);
-  _free_region_list = r;
-  _free_region_list_size++;
-  assert(_free_region_list_size == free_region_list_length(), "Inv");
-}
-
-void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  put_free_region_on_list_locked(r);
-}
-
-HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-  assert(_free_region_list_size == free_region_list_length(), "Inv");
-  HeapRegion* res = _free_region_list;
-  if (res != NULL) {
-    _free_region_list = res->next_from_free_list();
-    _free_region_list_size--;
-    res->set_on_free_list(false);
-    res->set_next_on_free_list(NULL);
-    assert(_free_region_list_size == free_region_list_length(), "Inv");
-  }
-  return res;
-}
-
-
-HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
-  // By self, or on behalf of self.
-  assert(Heap_lock->is_locked(), "Precondition");
-  HeapRegion* res = NULL;
-  bool first = true;
-  while (res == NULL) {
-    if (zero_filled || !first) {
-      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-      res = pop_free_region_list_locked();
-      if (res != NULL) {
-        assert(!res->zero_fill_is_allocated(),
-               "No allocated regions on free list.");
-        res->set_zero_fill_allocated();
-      } else if (!first) {
-        break;  // We tried both, time to return NULL.
-      }
-    }
-
-    if (res == NULL) {
-      res = alloc_region_from_unclean_list(zero_filled);
-    }
-    assert(res == NULL ||
-           !zero_filled ||
-           res->zero_fill_is_allocated(),
-           "We must have allocated the region we're returning");
-    first = false;
-  }
-  return res;
-}
-
-void G1CollectedHeap::remove_allocated_regions_from_lists() {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  {
-    HeapRegion* prev = NULL;
-    HeapRegion* cur = _unclean_region_list.hd();
-    while (cur != NULL) {
-      HeapRegion* next = cur->next_from_unclean_list();
-      if (cur->zero_fill_is_allocated()) {
-        // Remove from the list.
-        if (prev == NULL) {
-          (void)_unclean_region_list.pop();
-        } else {
-          _unclean_region_list.delete_after(prev);
-        }
-        cur->set_on_unclean_list(false);
-        cur->set_next_on_unclean_list(NULL);
-      } else {
-        prev = cur;
-      }
-      cur = next;
-    }
-    assert(_unclean_region_list.sz() == unclean_region_list_length(),
-           "Inv");
+void G1CollectedHeap::wait_while_free_regions_coming() {
+  // Most of the time we won't have to wait, so let's do a quick test
+  // first before we take the lock.
+  if (!free_regions_coming()) {
+    return;
+  }
+
+  if (G1ConcRegionFreeingVerbose) {
+    gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
+                           "waiting for free regions");
   }
 
   {
-    HeapRegion* prev = NULL;
-    HeapRegion* cur = _free_region_list;
-    while (cur != NULL) {
-      HeapRegion* next = cur->next_from_free_list();
-      if (cur->zero_fill_is_allocated()) {
-        // Remove from the list.
-        if (prev == NULL) {
-          _free_region_list = cur->next_from_free_list();
-        } else {
-          prev->set_next_on_free_list(cur->next_from_free_list());
-        }
-        cur->set_on_free_list(false);
-        cur->set_next_on_free_list(NULL);
-        _free_region_list_size--;
-      } else {
-        prev = cur;
-      }
-      cur = next;
+    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
+    while (free_regions_coming()) {
+      SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
     }
-    assert(_free_region_list_size == free_region_list_length(), "Inv");
-  }
-}
-
-bool G1CollectedHeap::verify_region_lists() {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  return verify_region_lists_locked();
-}
-
-bool G1CollectedHeap::verify_region_lists_locked() {
-  HeapRegion* unclean = _unclean_region_list.hd();
-  while (unclean != NULL) {
-    guarantee(unclean->is_on_unclean_list(), "Well, it is!");
-    guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
-    guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
-              "Everything else is possible.");
-    unclean = unclean->next_from_unclean_list();
-  }
-  guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
-
-  HeapRegion* free_r = _free_region_list;
-  while (free_r != NULL) {
-    assert(free_r->is_on_free_list(), "Well, it is!");
-    assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
-    switch (free_r->zero_fill_state()) {
-    case HeapRegion::NotZeroFilled:
-    case HeapRegion::ZeroFilling:
-      guarantee(false, "Should not be on free list.");
-      break;
-    default:
-      // Everything else is possible.
-      break;
-    }
-    free_r = free_r->next_from_free_list();
-  }
-  guarantee(_free_region_list_size == free_region_list_length(), "Inv");
-  // If we didn't do an assertion...
-  return true;
-}
-
-size_t G1CollectedHeap::free_region_list_length() {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-  size_t len = 0;
-  HeapRegion* cur = _free_region_list;
-  while (cur != NULL) {
-    len++;
-    cur = cur->next_from_free_list();
-  }
-  return len;
-}
-
-size_t G1CollectedHeap::unclean_region_list_length() {
-  assert(ZF_mon->owned_by_self(), "precondition.");
-  return _unclean_region_list.length();
+  }
+
+  if (G1ConcRegionFreeingVerbose) {
+    gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
+                           "done waiting for free regions");
+  }
 }
 
 size_t G1CollectedHeap::n_regions() {
@@ -5443,55 +5312,6 @@
     HeapRegion::GrainBytes;
 }
 
-size_t G1CollectedHeap::free_regions() {
-  /* Possibly-expensive assert.
-  assert(_free_regions == count_free_regions(),
-         "_free_regions is off.");
-  */
-  return _free_regions;
-}
-
-bool G1CollectedHeap::should_zf() {
-  return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
-}
-
-class RegionCounter: public HeapRegionClosure {
-  size_t _n;
-public:
-  RegionCounter() : _n(0) {}
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->is_empty()) {
-      assert(!r->isHumongous(), "H regions should not be empty.");
-      _n++;
-    }
-    return false;
-  }
-  int res() { return (int) _n; }
-};
-
-size_t G1CollectedHeap::count_free_regions() {
-  RegionCounter rc;
-  heap_region_iterate(&rc);
-  size_t n = rc.res();
-  if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
-    n--;
-  return n;
-}
-
-size_t G1CollectedHeap::count_free_regions_list() {
-  size_t n = 0;
-  size_t o = 0;
-  ZF_mon->lock_without_safepoint_check();
-  HeapRegion* cur = _free_region_list;
-  while (cur != NULL) {
-    cur = cur->next_from_free_list();
-    n++;
-  }
-  size_t m = unclean_region_list_length();
-  ZF_mon->unlock();
-  return n + m;
-}
-
 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
   assert(heap_lock_held_for_gc(),
               "the heap lock should already be held by or for this thread");
@@ -5563,28 +5383,19 @@
   }
 }
 
-
 // Done at the start of full GC.
 void G1CollectedHeap::tear_down_region_lists() {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  while (pop_unclean_region_list_locked() != NULL) ;
-  assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
-         "Postconditions of loop.");
-  while (pop_free_region_list_locked() != NULL) ;
-  assert(_free_region_list == NULL, "Postcondition of loop.");
-  if (_free_region_list_size != 0) {
-    gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
-    print_on(gclog_or_tty, true /* extended */);
-  }
-  assert(_free_region_list_size == 0, "Postconditions of loop.");
+  _free_list.remove_all();
 }
 
-
 class RegionResetter: public HeapRegionClosure {
-  G1CollectedHeap* _g1;
-  int _n;
+  G1CollectedHeap* _g1h;
+  FreeRegionList _local_free_list;
+
 public:
-  RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
+  RegionResetter() : _g1h(G1CollectedHeap::heap()),
+                     _local_free_list("Local Free List for RegionResetter") { }
+
   bool doHeapRegion(HeapRegion* r) {
     if (r->continuesHumongous()) return false;
     if (r->top() > r->bottom()) {
@@ -5592,152 +5403,32 @@
         Copy::fill_to_words(r->top(),
                           pointer_delta(r->end(), r->top()));
       }
-      r->set_zero_fill_allocated();
     } else {
       assert(r->is_empty(), "tautology");
-      _n++;
-      switch (r->zero_fill_state()) {
-        case HeapRegion::NotZeroFilled:
-        case HeapRegion::ZeroFilling:
-          _g1->put_region_on_unclean_list_locked(r);
-          break;
-        case HeapRegion::Allocated:
-          r->set_zero_fill_complete();
-          // no break; go on to put on free list.
-        case HeapRegion::ZeroFilled:
-          _g1->put_free_region_on_list_locked(r);
-          break;
-      }
+      _local_free_list.add_as_tail(r);
     }
     return false;
   }
 
-  int getFreeRegionCount() {return _n;}
+  void update_free_lists() {
+    _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL,
+                                            false /* par */);
+  }
 };
 
 // Done at the end of full GC.
 void G1CollectedHeap::rebuild_region_lists() {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   // This needs to go at the end of the full GC.
   RegionResetter rs;
   heap_region_iterate(&rs);
-  _free_regions = rs.getFreeRegionCount();
-  // Tell the ZF thread it may have work to do.
-  if (should_zf()) ZF_mon->notify_all();
-}
-
-class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
-  G1CollectedHeap* _g1;
-  int _n;
-public:
-  UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->continuesHumongous()) return false;
-    if (r->top() > r->bottom()) {
-      // There are assertions in "set_zero_fill_needed()" below that
-      // require top() == bottom(), so this is technically illegal.
-      // We'll skirt the law here, by making that true temporarily.
-      DEBUG_ONLY(HeapWord* save_top = r->top();
-                 r->set_top(r->bottom()));
-      r->set_zero_fill_needed();
-      DEBUG_ONLY(r->set_top(save_top));
-    }
-    return false;
-  }
-};
-
-// Done at the start of full GC.
-void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  // This needs to go at the end of the full GC.
-  UsedRegionsNeedZeroFillSetter rs;
-  heap_region_iterate(&rs);
+  rs.update_free_lists();
 }
 
 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
   _refine_cte_cl->set_concurrent(concurrent);
 }
 
-#ifndef PRODUCT
-
-class PrintHeapRegionClosure: public HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion *r) {
-    gclog_or_tty->print("Region: "PTR_FORMAT":", r);
-    if (r != NULL) {
-      if (r->is_on_free_list())
-        gclog_or_tty->print("Free ");
-      if (r->is_young())
-        gclog_or_tty->print("Young ");
-      if (r->isHumongous())
-        gclog_or_tty->print("Is Humongous ");
-      r->print();
-    }
-    return false;
-  }
-};
-
-class SortHeapRegionClosure : public HeapRegionClosure {
-  size_t young_regions,free_regions, unclean_regions;
-  size_t hum_regions, count;
-  size_t unaccounted, cur_unclean, cur_alloc;
-  size_t total_free;
-  HeapRegion* cur;
-public:
-  SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
-    free_regions(0), unclean_regions(0),
-    hum_regions(0),
-    count(0), unaccounted(0),
-    cur_alloc(0), total_free(0)
-  {}
-  bool doHeapRegion(HeapRegion *r) {
-    count++;
-    if (r->is_on_free_list()) free_regions++;
-    else if (r->is_on_unclean_list()) unclean_regions++;
-    else if (r->isHumongous())  hum_regions++;
-    else if (r->is_young()) young_regions++;
-    else if (r == cur) cur_alloc++;
-    else unaccounted++;
-    return false;
-  }
-  void print() {
-    total_free = free_regions + unclean_regions;
-    gclog_or_tty->print("%d regions\n", count);
-    gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
-                        total_free, free_regions, unclean_regions);
-    gclog_or_tty->print("%d humongous %d young\n",
-                        hum_regions, young_regions);
-    gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
-    gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
-  }
-};
-
-void G1CollectedHeap::print_region_counts() {
-  SortHeapRegionClosure sc(_cur_alloc_region);
-  PrintHeapRegionClosure cl;
-  heap_region_iterate(&cl);
-  heap_region_iterate(&sc);
-  sc.print();
-  print_region_accounting_info();
-};
-
-bool G1CollectedHeap::regions_accounted_for() {
-  // TODO: regions accounting for young/survivor/tenured
-  return true;
-}
-
-bool G1CollectedHeap::print_region_accounting_info() {
-  gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
-                         free_regions(),
-                         count_free_regions(), count_free_regions_list(),
-                         _free_region_list_size, _unclean_region_list.sz());
-  gclog_or_tty->print_cr("cur_alloc: %d.",
-                         (_cur_alloc_region == NULL ? 0 : 1));
-  gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
-
-  // TODO: check regions accounting for young/survivor/tenured
-  return true;
-}
+#ifdef ASSERT
 
 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
   HeapRegion* hr = heap_region_containing(p);
@@ -5747,8 +5438,84 @@
     return hr->is_in(p);
   }
 }
-#endif // !PRODUCT
-
-void G1CollectedHeap::g1_unimplemented() {
-  // Unimplemented();
+#endif // ASSERT
+
+class VerifyRegionListsClosure : public HeapRegionClosure {
+private:
+  HumongousRegionSet* _humongous_set;
+  FreeRegionList*     _free_list;
+  size_t              _region_count;
+
+public:
+  VerifyRegionListsClosure(HumongousRegionSet* humongous_set,
+                           FreeRegionList* free_list) :
+    _humongous_set(humongous_set), _free_list(free_list),
+    _region_count(0) { }
+
+  size_t region_count()      { return _region_count;      }
+
+  bool doHeapRegion(HeapRegion* hr) {
+    _region_count += 1;
+
+    if (hr->continuesHumongous()) {
+      return false;
+    }
+
+    if (hr->is_young()) {
+      // TODO
+    } else if (hr->startsHumongous()) {
+      _humongous_set->verify_next_region(hr);
+    } else if (hr->is_empty()) {
+      _free_list->verify_next_region(hr);
+    }
+    return false;
+  }
+};
+
+void G1CollectedHeap::verify_region_sets() {
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
+
+  // First, check the explicit lists.
+  _free_list.verify();
+  {
+    // Given that a concurrent operation might be adding regions to
+    // the secondary free list we have to take the lock before
+    // verifying it.
+    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
+    _secondary_free_list.verify();
+  }
+  _humongous_set.verify();
+
+  // If a concurrent region freeing operation is in progress it will
+  // be difficult to correctly attributed any free regions we come
+  // across to the correct free list given that they might belong to
+  // one of several (free_list, secondary_free_list, any local lists,
+  // etc.). So, if that's the case we will skip the rest of the
+  // verification operation. Alternatively, waiting for the concurrent
+  // operation to complete will have a non-trivial effect on the GC's
+  // operation (no concurrent operation will last longer than the
+  // interval between two calls to verification) and it might hide
+  // any issues that we would like to catch during testing.
+  if (free_regions_coming()) {
+    return;
+  }
+
+  {
+    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
+    // Make sure we append the secondary_free_list on the free_list so
+    // that all free regions we will come across can be safely
+    // attributed to the free_list.
+    append_secondary_free_list();
+  }
+
+  // Finally, make sure that the region accounting in the lists is
+  // consistent with what we see in the heap.
+  _humongous_set.verify_start();
+  _free_list.verify_start();
+
+  VerifyRegionListsClosure cl(&_humongous_set, &_free_list);
+  heap_region_iterate(&cl);
+
+  _humongous_set.verify_end();
+  _free_list.verify_end();
 }
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
 #include "memory/barrierSet.hpp"
 #include "memory/memRegion.hpp"
@@ -66,8 +66,7 @@
 enum G1GCThreadGroups {
   G1CRGroup = 0,
   G1ZFGroup = 1,
-  G1CMGroup = 2,
-  G1CLGroup = 3
+  G1CMGroup = 2
 };
 
 enum GCAllocPurpose {
@@ -155,6 +154,7 @@
   friend class RefineCardTableEntryClosure;
   friend class G1PrepareCompactClosure;
   friend class RegionSorter;
+  friend class RegionResetter;
   friend class CountRCClosure;
   friend class EvacPopObjClosure;
   friend class G1ParCleanupCTTask;
@@ -178,17 +178,20 @@
   // The maximum part of _g1_storage that has ever been committed.
   MemRegion _g1_max_committed;
 
-  // The number of regions that are completely free.
-  size_t _free_regions;
+  // The master free list. It will satisfy all new region allocations.
+  MasterFreeRegionList      _free_list;
+
+  // The secondary free list which contains regions that have been
+  // freed up during the cleanup process. This will be appended to the
+  // master free list when appropriate.
+  SecondaryFreeRegionList   _secondary_free_list;
+
+  // It keeps track of the humongous regions.
+  MasterHumongousRegionSet  _humongous_set;
 
   // The number of regions we could create by expansion.
   size_t _expansion_regions;
 
-  // Return the number of free regions in the heap (by direct counting.)
-  size_t count_free_regions();
-  // Return the number of free regions on the free and unclean lists.
-  size_t count_free_regions_list();
-
   // The block offset table for the G1 heap.
   G1BlockOffsetSharedArray* _bot_shared;
 
@@ -196,9 +199,6 @@
   // lists, before and after full GC.
   void tear_down_region_lists();
   void rebuild_region_lists();
-  // This sets all non-empty regions to need zero-fill (which they will if
-  // they are empty after full collection.)
-  void set_used_regions_to_need_zero_fill();
 
   // The sequence of all heap regions in the heap.
   HeapRegionSeq* _hrs;
@@ -231,7 +231,7 @@
   // Determines PLAB size for a particular allocation purpose.
   static size_t desired_plab_sz(GCAllocPurpose purpose);
 
-  // When called by par thread, require par_alloc_during_gc_lock() to be held.
+  // When called by par thread, requires the FreeList_lock to be held.
   void push_gc_alloc_region(HeapRegion* hr);
 
   // This should only be called single-threaded.  Undeclares all GC alloc
@@ -294,10 +294,11 @@
   // line number, file, etc.
 
 #define heap_locking_asserts_err_msg(__extra_message)                         \
-  err_msg("%s : Heap_lock %slocked, %sat a safepoint",                        \
+  err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
           (__extra_message),                                                  \
-          (!Heap_lock->owned_by_self()) ? "NOT " : "",                        \
-          (!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
+          BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
+          BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
+          BOOL_TO_STR(Thread::current()->is_VM_thread()))
 
 #define assert_heap_locked()                                                  \
   do {                                                                        \
@@ -305,10 +306,11 @@
            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
   } while (0)
 
-#define assert_heap_locked_or_at_safepoint()                                  \
+#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread)             \
   do {                                                                        \
     assert(Heap_lock->owned_by_self() ||                                      \
-                                     SafepointSynchronize::is_at_safepoint(), \
+           (SafepointSynchronize::is_at_safepoint() &&                        \
+             ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
            heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
                                         "should be at a safepoint"));         \
   } while (0)
@@ -335,9 +337,10 @@
                                    "should not be at a safepoint"));          \
   } while (0)
 
-#define assert_at_safepoint()                                                 \
+#define assert_at_safepoint(__should_be_vm_thread)                            \
   do {                                                                        \
-    assert(SafepointSynchronize::is_at_safepoint(),                           \
+    assert(SafepointSynchronize::is_at_safepoint() &&                         \
+              ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
            heap_locking_asserts_err_msg("should be at a safepoint"));         \
   } while (0)
 
@@ -362,31 +365,41 @@
   // The current policy object for the collector.
   G1CollectorPolicy* _g1_policy;
 
-  // Parallel allocation lock to protect the current allocation region.
-  Mutex  _par_alloc_during_gc_lock;
-  Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; }
+  // This is the second level of trying to allocate a new region. If
+  // new_region_work didn't find a region in the free_list, this call
+  // will check whether there's anything available in the
+  // secondary_free_list and/or wait for more regions to appear in that
+  // list, if _free_regions_coming is set.
+  HeapRegion* new_region_try_secondary_free_list(size_t word_size);
 
-  // If possible/desirable, allocate a new HeapRegion for normal object
-  // allocation sufficient for an allocation of the given "word_size".
-  // If "do_expand" is true, will attempt to expand the heap if necessary
-  // to to satisfy the request.  If "zero_filled" is true, requires a
-  // zero-filled region.
-  // (Returning NULL will trigger a GC.)
-  virtual HeapRegion* newAllocRegion_work(size_t word_size,
-                                          bool do_expand,
-                                          bool zero_filled);
+  // It will try to allocate a single non-humongous HeapRegion
+  // sufficient for an allocation of the given word_size.  If
+  // do_expand is true, it will attempt to expand the heap if
+  // necessary to satisfy the allocation request. Note that word_size
+  // is only used to make sure that we expand sufficiently but, given
+  // that the allocation request is assumed not to be humongous,
+  // having word_size is not strictly necessary (expanding by a single
+  // region will always be sufficient). But let's keep that parameter
+  // in case we need it in the future.
+  HeapRegion* new_region_work(size_t word_size, bool do_expand);
 
-  virtual HeapRegion* newAllocRegion(size_t word_size,
-                                     bool zero_filled = true) {
-    return newAllocRegion_work(word_size, false, zero_filled);
+  // It will try to allocate a new region to be used for allocation by
+  // mutator threads. It will not try to expand the heap if not region
+  // is available.
+  HeapRegion* new_alloc_region(size_t word_size) {
+    return new_region_work(word_size, false /* do_expand */);
   }
-  virtual HeapRegion* newAllocRegionWithExpansion(int purpose,
-                                                  size_t word_size,
-                                                  bool zero_filled = true);
+
+  // It will try to allocate a new region to be used for allocation by
+  // a GC thread. It will try to expand the heap if no region is
+  // available.
+  HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
+
+  int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
 
   // Attempt to allocate an object of the given (very large) "word_size".
   // Returns "NULL" on failure.
-  virtual HeapWord* humongous_obj_allocate(size_t word_size);
+  HeapWord* humongous_obj_allocate(size_t word_size);
 
   // The following two methods, allocate_new_tlab() and
   // mem_allocate(), are the two main entry points from the runtime
@@ -430,7 +443,8 @@
                                  bool*  gc_overhead_limit_was_exceeded);
 
   // The following methods, allocate_from_cur_allocation_region(),
-  // attempt_allocation(), replace_cur_alloc_region_and_allocate(),
+  // attempt_allocation(), attempt_allocation_locked(),
+  // replace_cur_alloc_region_and_allocate(),
   // attempt_allocation_slow(), and attempt_allocation_humongous()
   // have very awkward pre- and post-conditions with respect to
   // locking:
@@ -481,20 +495,30 @@
   // successfully manage to allocate it, or NULL.
 
   // It tries to satisfy an allocation request out of the current
-  // allocating region, which is passed as a parameter. It assumes
-  // that the caller has checked that the current allocating region is
-  // not NULL. Given that the caller has to check the current
-  // allocating region for at least NULL, it might as well pass it as
-  // the first parameter so that the method doesn't have to read it
-  // from the _cur_alloc_region field again.
+  // alloc region, which is passed as a parameter. It assumes that the
+  // caller has checked that the current alloc region is not NULL.
+  // Given that the caller has to check the current alloc region for
+  // at least NULL, it might as well pass it as the first parameter so
+  // that the method doesn't have to read it from the
+  // _cur_alloc_region field again. It is called from both
+  // attempt_allocation() and attempt_allocation_locked() and the
+  // with_heap_lock parameter indicates whether the caller was holding
+  // the heap lock when it called it or not.
   inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
-                                                  size_t word_size);
+                                                  size_t word_size,
+                                                  bool with_heap_lock);
 
-  // It attempts to allocate out of the current alloc region. If that
-  // fails, it retires the current alloc region (if there is one),
-  // tries to get a new one and retries the allocation.
+  // First-level of allocation slow path: it attempts to allocate out
+  // of the current alloc region in a lock-free manner using a CAS. If
+  // that fails it takes the Heap_lock and calls
+  // attempt_allocation_locked() for the second-level slow path.
   inline HeapWord* attempt_allocation(size_t word_size);
 
+  // Second-level of allocation slow path: while holding the Heap_lock
+  // it tries to allocate out of the current alloc region and, if that
+  // fails, tries to allocate out of a new current alloc region.
+  inline HeapWord* attempt_allocation_locked(size_t word_size);
+
   // It assumes that the current alloc region has been retired and
   // tries to allocate a new one. If it's successful, it performs the
   // allocation out of the new current alloc region and updates
@@ -506,11 +530,11 @@
                                                   bool do_dirtying,
                                                   bool can_expand);
 
-  // The slow path when we are unable to allocate a new current alloc
-  // region to satisfy an allocation request (i.e., when
-  // attempt_allocation() fails). It will try to do an evacuation
-  // pause, which might stall due to the GC locker, and retry the
-  // allocation attempt when appropriate.
+  // Third-level of allocation slow path: when we are unable to
+  // allocate a new current alloc region to satisfy an allocation
+  // request (i.e., when attempt_allocation_locked() fails). It will
+  // try to do an evacuation pause, which might stall due to the GC
+  // locker, and retry the allocation attempt when appropriate.
   HeapWord* attempt_allocation_slow(size_t word_size);
 
   // The method that tries to satisfy a humongous allocation
@@ -643,16 +667,16 @@
   // can happen in a nested fashion, i.e., we start a concurrent
   // cycle, a Full GC happens half-way through it which ends first,
   // and then the cycle notices that a Full GC happened and ends
-  // too. The outer parameter is a boolean to help us do a bit tighter
-  // consistency checking in the method. If outer is false, the caller
-  // is the inner caller in the nesting (i.e., the Full GC). If outer
-  // is true, the caller is the outer caller in this nesting (i.e.,
-  // the concurrent cycle). Further nesting is not currently
-  // supported. The end of the this call also notifies the
-  // FullGCCount_lock in case a Java thread is waiting for a full GC
-  // to happen (e.g., it called System.gc() with
+  // too. The concurrent parameter is a boolean to help us do a bit
+  // tighter consistency checking in the method. If concurrent is
+  // false, the caller is the inner caller in the nesting (i.e., the
+  // Full GC). If concurrent is true, the caller is the outer caller
+  // in this nesting (i.e., the concurrent cycle). Further nesting is
+  // not currently supported. The end of the this call also notifies
+  // the FullGCCount_lock in case a Java thread is waiting for a full
+  // GC to happen (e.g., it called System.gc() with
   // +ExplicitGCInvokesConcurrent).
-  void increment_full_collections_completed(bool outer);
+  void increment_full_collections_completed(bool concurrent);
 
   unsigned int full_collections_completed() {
     return _full_collections_completed;
@@ -749,20 +773,29 @@
   // Invoke "save_marks" on all heap regions.
   void save_marks();
 
-  // Free a heap region.
-  void free_region(HeapRegion* hr);
-  // A component of "free_region", exposed for 'batching'.
-  // All the params after "hr" are out params: the used bytes of the freed
-  // region(s), the number of H regions cleared, the number of regions
-  // freed, and pointers to the head and tail of a list of freed contig
-  // regions, linked throught the "next_on_unclean_list" field.
-  void free_region_work(HeapRegion* hr,
-                        size_t& pre_used,
-                        size_t& cleared_h,
-                        size_t& freed_regions,
-                        UncleanRegionList* list,
-                        bool par = false);
+  // It frees a non-humongous region by initializing its contents and
+  // adding it to the free list that's passed as a parameter (this is
+  // usually a local list which will be appended to the master free
+  // list later). The used bytes of freed regions are accumulated in
+  // pre_used. If par is true, the region's RSet will not be freed
+  // up. The assumption is that this will be done later.
+  void free_region(HeapRegion* hr,
+                   size_t* pre_used,
+                   FreeRegionList* free_list,
+                   bool par);
 
+  // It frees a humongous region by collapsing it into individual
+  // regions and calling free_region() for each of them. The freed
+  // regions will be added to the free list that's passed as a parameter
+  // (this is usually a local list which will be appended to the
+  // master free list later). The used bytes of freed regions are
+  // accumulated in pre_used. If par is true, the region's RSet will
+  // not be freed up. The assumption is that this will be done later.
+  void free_humongous_region(HeapRegion* hr,
+                             size_t* pre_used,
+                             FreeRegionList* free_list,
+                             HumongousRegionSet* humongous_proxy_set,
+                             bool par);
 
   // The concurrent marker (and the thread it runs in.)
   ConcurrentMark* _cm;
@@ -772,9 +805,6 @@
   // The concurrent refiner.
   ConcurrentG1Refine* _cg1r;
 
-  // The concurrent zero-fill thread.
-  ConcurrentZFThread* _czft;
-
   // The parallel task queues
   RefToScanQueueSet *_task_queues;
 
@@ -826,7 +856,6 @@
   void finalize_for_evac_failure();
 
   // An attempt to evacuate "obj" has failed; take necessary steps.
-  void handle_evacuation_failure(oop obj);
   oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
   void handle_evacuation_failure_common(oop obj, markOop m);
 
@@ -849,6 +878,12 @@
   void print_gc_alloc_regions();
 #endif // !PRODUCT
 
+  // Instance of the concurrent mark is_alive closure for embedding
+  // into the reference processor as the is_alive_non_header. This
+  // prevents unnecessary additions to the discovered lists during
+  // concurrent discovery.
+  G1CMIsAliveClosure _is_alive_closure;
+
   // ("Weak") Reference processing support
   ReferenceProcessor* _ref_processor;
 
@@ -861,9 +896,7 @@
 
   SubTasksDone* _process_strong_tasks;
 
-  // List of regions which require zero filling.
-  UncleanRegionList _unclean_region_list;
-  bool _unclean_regions_coming;
+  volatile bool _free_regions_coming;
 
 public:
 
@@ -893,7 +926,7 @@
   // specified by the policy object.
   jint initialize();
 
-  void ref_processing_init();
+  virtual void ref_processing_init();
 
   void set_par_threads(int t) {
     SharedHeap::set_par_threads(t);
@@ -986,71 +1019,64 @@
   size_t max_regions();
 
   // The number of regions that are completely free.
-  size_t free_regions();
+  size_t free_regions() {
+    return _free_list.length();
+  }
 
   // The number of regions that are not completely free.
   size_t used_regions() { return n_regions() - free_regions(); }
 
-  // True iff the ZF thread should run.
-  bool should_zf();
-
   // The number of regions available for "regular" expansion.
   size_t expansion_regions() { return _expansion_regions; }
 
-#ifndef PRODUCT
-  bool regions_accounted_for();
-  bool print_region_accounting_info();
-  void print_region_counts();
-#endif
-
-  HeapRegion* alloc_region_from_unclean_list(bool zero_filled);
-  HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled);
-
-  void put_region_on_unclean_list(HeapRegion* r);
-  void put_region_on_unclean_list_locked(HeapRegion* r);
+  // verify_region_sets() performs verification over the region
+  // lists. It will be compiled in the product code to be used when
+  // necessary (i.e., during heap verification).
+  void verify_region_sets();
 
-  void prepend_region_list_on_unclean_list(UncleanRegionList* list);
-  void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list);
+  // verify_region_sets_optional() is planted in the code for
+  // list verification in non-product builds (and it can be enabled in
+  // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
+#if HEAP_REGION_SET_FORCE_VERIFY
+  void verify_region_sets_optional() {
+    verify_region_sets();
+  }
+#else // HEAP_REGION_SET_FORCE_VERIFY
+  void verify_region_sets_optional() { }
+#endif // HEAP_REGION_SET_FORCE_VERIFY
 
-  void set_unclean_regions_coming(bool b);
-  void set_unclean_regions_coming_locked(bool b);
-  // Wait for cleanup to be complete.
-  void wait_for_cleanup_complete();
-  // Like above, but assumes that the calling thread owns the Heap_lock.
-  void wait_for_cleanup_complete_locked();
-
-  // Return the head of the unclean list.
-  HeapRegion* peek_unclean_region_list_locked();
-  // Remove and return the head of the unclean list.
-  HeapRegion* pop_unclean_region_list_locked();
+#ifdef ASSERT
+  bool is_on_free_list(HeapRegion* hr) {
+    return hr->containing_set() == &_free_list;
+  }
 
-  // List of regions which are zero filled and ready for allocation.
-  HeapRegion* _free_region_list;
-  // Number of elements on the free list.
-  size_t _free_region_list_size;
+  bool is_on_humongous_set(HeapRegion* hr) {
+    return hr->containing_set() == &_humongous_set;
+}
+#endif // ASSERT
 
-  // If the head of the unclean list is ZeroFilled, move it to the free
-  // list.
-  bool move_cleaned_region_to_free_list_locked();
-  bool move_cleaned_region_to_free_list();
+  // Wrapper for the region list operations that can be called from
+  // methods outside this class.
 
-  void put_free_region_on_list_locked(HeapRegion* r);
-  void put_free_region_on_list(HeapRegion* r);
+  void secondary_free_list_add_as_tail(FreeRegionList* list) {
+    _secondary_free_list.add_as_tail(list);
+  }
 
-  // Remove and return the head element of the free list.
-  HeapRegion* pop_free_region_list_locked();
+  void append_secondary_free_list() {
+    _free_list.add_as_tail(&_secondary_free_list);
+  }
 
-  // If "zero_filled" is true, we first try the free list, then we try the
-  // unclean list, zero-filling the result.  If "zero_filled" is false, we
-  // first try the unclean list, then the zero-filled list.
-  HeapRegion* alloc_free_region_from_lists(bool zero_filled);
+  void append_secondary_free_list_if_not_empty() {
+    if (!_secondary_free_list.is_empty()) {
+      MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
+      append_secondary_free_list();
+    }
+  }
 
-  // Verify the integrity of the region lists.
-  void remove_allocated_regions_from_lists();
-  bool verify_region_lists();
-  bool verify_region_lists_locked();
-  size_t unclean_region_list_length();
-  size_t free_region_list_length();
+  void set_free_regions_coming();
+  void reset_free_regions_coming();
+  bool free_regions_coming() { return _free_regions_coming; }
+  void wait_while_free_regions_coming();
 
   // Perform a collection of the heap; intended for use in implementing
   // "System.gc".  This probably implies as full a collection as the
@@ -1069,23 +1095,24 @@
   // True iff a evacuation has failed in the most-recent collection.
   bool evacuation_failed() { return _evacuation_failed; }
 
-  // Free a region if it is totally full of garbage.  Returns the number of
-  // bytes freed (0 ==> didn't free it).
-  size_t free_region_if_totally_empty(HeapRegion *hr);
-  void free_region_if_totally_empty_work(HeapRegion *hr,
-                                         size_t& pre_used,
-                                         size_t& cleared_h_regions,
-                                         size_t& freed_regions,
-                                         UncleanRegionList* list,
-                                         bool par = false);
+  // It will free a region if it has allocated objects in it that are
+  // all dead. It calls either free_region() or
+  // free_humongous_region() depending on the type of the region that
+  // is passed to it.
+  void free_region_if_totally_empty(HeapRegion* hr,
+                                    size_t* pre_used,
+                                    FreeRegionList* free_list,
+                                    HumongousRegionSet* humongous_proxy_set,
+                                    bool par);
 
-  // If we've done free region work that yields the given changes, update
-  // the relevant global variables.
-  void finish_free_region_work(size_t pre_used,
-                               size_t cleared_h_regions,
-                               size_t freed_regions,
-                               UncleanRegionList* list);
-
+  // It appends the free list to the master free list and updates the
+  // master humongous list according to the contents of the proxy
+  // list. It also adjusts the total used bytes according to pre_used
+  // (if par is true, it will do so by taking the ParGCRareEvent_lock).
+  void update_sets_after_freeing_regions(size_t pre_used,
+                                       FreeRegionList* free_list,
+                                       HumongousRegionSet* humongous_proxy_set,
+                                       bool par);
 
   // Returns "TRUE" iff "p" points into the allocated area of the heap.
   virtual bool is_in(const void* p) const;
@@ -1298,8 +1325,6 @@
     return true;
   }
 
-  virtual bool allocs_are_zero_filled();
-
   // The boundary between a "large" and "small" array of primitives, in
   // words.
   virtual size_t large_typearray_limit();
@@ -1530,13 +1555,6 @@
 
 protected:
   size_t _max_heap_capacity;
-
-public:
-  // Temporary: call to mark things unimplemented for the G1 heap (e.g.,
-  // MemoryService).  In productization, we can make this assert false
-  // to catch such places (as well as searching for calls to this...)
-  static void g1_unimplemented();
-
 };
 
 #define use_local_bitmaps         1
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
+#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
 #include "utilities/taskqueue.hpp"
 
 // Inline functions for G1CollectedHeap
@@ -63,10 +63,12 @@
 // assumptions of this method (and other related ones).
 inline HeapWord*
 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
-                                                size_t word_size) {
-  assert_heap_locked_and_not_at_safepoint();
+                                                size_t word_size,
+                                                bool with_heap_lock) {
+  assert_not_at_safepoint();
+  assert(with_heap_lock == Heap_lock->owned_by_self(),
+         "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
   assert(cur_alloc_region != NULL, "pre-condition of the method");
-  assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
   assert(cur_alloc_region->is_young(),
          "we only support young current alloc regions");
   assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
@@ -76,20 +78,24 @@
   assert(!cur_alloc_region->is_empty(),
          err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
                  cur_alloc_region->bottom(), cur_alloc_region->end()));
-  // This allocate method does BOT updates and we don't need them in
-  // the young generation. This will be fixed in the near future by
-  // CR 6994297.
-  HeapWord* result = cur_alloc_region->allocate(word_size);
+  HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
   if (result != NULL) {
     assert(is_in(result), "result should be in the heap");
-    Heap_lock->unlock();
 
+    if (with_heap_lock) {
+      Heap_lock->unlock();
+    }
+    assert_heap_not_locked();
     // Do the dirtying after we release the Heap_lock.
     dirty_young_block(result, word_size);
     return result;
   }
 
-  assert_heap_locked();
+  if (with_heap_lock) {
+    assert_heap_locked();
+  } else {
+    assert_heap_not_locked();
+  }
   return NULL;
 }
 
@@ -97,26 +103,75 @@
 // assumptions of this method (and other related ones).
 inline HeapWord*
 G1CollectedHeap::attempt_allocation(size_t word_size) {
-  assert_heap_locked_and_not_at_safepoint();
+  assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "attempt_allocation() should not be called "
          "for humongous allocation requests");
 
   HeapRegion* cur_alloc_region = _cur_alloc_region;
   if (cur_alloc_region != NULL) {
     HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
-                                                      word_size);
+                                                   word_size,
+                                                   false /* with_heap_lock */);
+    assert_heap_not_locked();
+    if (result != NULL) {
+      return result;
+    }
+  }
+
+  // Our attempt to allocate lock-free failed as the current
+  // allocation region is either NULL or full. So, we'll now take the
+  // Heap_lock and retry.
+  Heap_lock->lock();
+
+  HeapWord* result = attempt_allocation_locked(word_size);
+  if (result != NULL) {
+    assert_heap_not_locked();
+    return result;
+  }
+
+  assert_heap_locked();
+  return NULL;
+}
+
+inline void
+G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
+  assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
+         "pre-condition of the call");
+  assert(cur_alloc_region->is_young(),
+         "we only support young current alloc regions");
+
+  // The region is guaranteed to be young
+  g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
+  _summary_bytes_used += cur_alloc_region->used();
+  _cur_alloc_region = NULL;
+}
+
+inline HeapWord*
+G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
+  assert_heap_locked_and_not_at_safepoint();
+  assert(!isHumongous(word_size), "attempt_allocation_locked() "
+         "should not be called for humongous allocation requests");
+
+  // First, reread the current alloc region and retry the allocation
+  // in case somebody replaced it while we were waiting to get the
+  // Heap_lock.
+  HeapRegion* cur_alloc_region = _cur_alloc_region;
+  if (cur_alloc_region != NULL) {
+    HeapWord* result = allocate_from_cur_alloc_region(
+                                                  cur_alloc_region, word_size,
+                                                  true /* with_heap_lock */);
     if (result != NULL) {
       assert_heap_not_locked();
       return result;
     }
 
-    assert_heap_locked();
-
-    // Since we couldn't successfully allocate into it, retire the
-    // current alloc region.
+    // We failed to allocate out of the current alloc region, so let's
+    // retire it before getting a new one.
     retire_cur_alloc_region(cur_alloc_region);
   }
 
+  assert_heap_locked();
   // Try to get a new region and allocate out of it
   HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
                                                      false, /* at_safepoint */
@@ -131,20 +186,6 @@
   return NULL;
 }
 
-inline void
-G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
-  assert_heap_locked_or_at_safepoint();
-  assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
-         "pre-condition of the call");
-  assert(cur_alloc_region->is_young(),
-         "we only support young current alloc regions");
-
-  // The region is guaranteed to be young
-  g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
-  _summary_bytes_used += cur_alloc_region->used();
-  _cur_alloc_region = NULL;
-}
-
 // It dirties the cards that cover the block so that so that the post
 // write barrier never queues anything when updating objects on this
 // block. It is assumed (and in fact we assert) that the block
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2875,8 +2875,6 @@
   // Adjust for expansion and slop.
   max_live_bytes = max_live_bytes + expansion_bytes;
 
-  assert(_g1->regions_accounted_for(), "Region leakage!");
-
   HeapRegion* hr;
   if (in_young_gc_mode()) {
     double young_start_time_sec = os::elapsedTime();
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -113,6 +113,7 @@
 
   Threads::gc_epilogue();
   CodeCache::gc_epilogue();
+  JvmtiExport::gc_epilogue();
 
   // refs processing: clean slate
   GenMarkSweep::_ref_processor = NULL;
@@ -180,26 +181,46 @@
 }
 
 class G1PrepareCompactClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   ModRefBarrierSet* _mrbs;
   CompactPoint _cp;
+  size_t _pre_used;
+  FreeRegionList _free_list;
+  HumongousRegionSet _humongous_proxy_set;
 
   void free_humongous_region(HeapRegion* hr) {
-    HeapWord* bot = hr->bottom();
     HeapWord* end = hr->end();
     assert(hr->startsHumongous(),
            "Only the start of a humongous region should be freed.");
-    G1CollectedHeap::heap()->free_region(hr);
+    _g1h->free_humongous_region(hr, &_pre_used, &_free_list,
+                                &_humongous_proxy_set, false /* par */);
+    // Do we also need to do this for the continues humongous regions
+    // we just collapsed?
     hr->prepare_for_compaction(&_cp);
     // Also clear the part of the card table that will be unused after
     // compaction.
-    _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
+    _mrbs->clear(MemRegion(hr->compaction_top(), end));
   }
 
 public:
-  G1PrepareCompactClosure(CompactibleSpace* cs) :
+  G1PrepareCompactClosure(CompactibleSpace* cs)
+  : _g1h(G1CollectedHeap::heap()),
+    _mrbs(G1CollectedHeap::heap()->mr_bs()),
     _cp(NULL, cs, cs->initialize_threshold()),
-    _mrbs(G1CollectedHeap::heap()->mr_bs())
-  {}
+    _pre_used(0),
+    _free_list("Local Free List for G1MarkSweep"),
+    _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
+
+  void update_sets() {
+    // We'll recalculate total used bytes and recreate the free list
+    // at the end of the GC, so no point in updating those values here.
+    _g1h->update_sets_after_freeing_regions(0, /* pre_used */
+                                            NULL, /* free_list */
+                                            &_humongous_proxy_set,
+                                            false /* par */);
+    _free_list.remove_all();
+  }
+
   bool doHeapRegion(HeapRegion* hr) {
     if (hr->isHumongous()) {
       if (hr->startsHumongous()) {
@@ -265,6 +286,7 @@
 
   G1PrepareCompactClosure blk(sp);
   g1h->heap_region_iterate(&blk);
+  blk.update_sets();
 
   CompactPoint perm_cp(pg, NULL, NULL);
   pg->prepare_for_compaction(&perm_cp);
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,21 +75,12 @@
           "(0 means do not periodically generate this info); "              \
           "it also requires -XX:+G1SummarizeRSetStats")                     \
                                                                             \
-  diagnostic(bool, G1SummarizeZFStats, false,                               \
-          "Summarize zero-filling info")                                    \
-                                                                            \
   diagnostic(bool, G1TraceConcRefinement, false,                            \
           "Trace G1 concurrent refinement")                                 \
                                                                             \
   product(intx, G1MarkRegionStackSize, 1024 * 1024,                         \
           "Size of the region stack for concurrent marking.")               \
                                                                             \
-  develop(bool, G1ConcZeroFill, true,                                       \
-          "If true, run concurrent zero-filling thread")                    \
-                                                                            \
-  develop(intx, G1ConcZFMaxRegions, 1,                                      \
-          "Stop zero-filling when # of zf'd regions reaches")               \
-                                                                            \
   develop(bool, G1SATBBarrierPrintNullPreVals, false,                       \
           "If true, count frac of ptr writes with null pre-vals.")          \
                                                                             \
@@ -99,6 +90,13 @@
   develop(intx, G1SATBProcessCompletedThreshold, 20,                        \
           "Number of completed buffers that triggers log processing.")      \
                                                                             \
+  product(uintx, G1SATBBufferEnqueueingThresholdPercent, 60,                \
+          "Before enqueueing them, each mutator thread tries to do some "   \
+          "filtering on the SATB buffers it generates. If post-filtering "  \
+          "the percentage of retained entries is over this threshold "      \
+          "the buffer will be enqueued for processing. A value of 0 "       \
+          "specifies that mutator threads should not do such filtering.")   \
+                                                                            \
   develop(intx, G1ExtraRegionSurvRate, 33,                                  \
           "If the young survival rate is S, and there's room left in "      \
           "to-space, we will allow regions whose survival rate is up to "   \
@@ -282,7 +280,20 @@
           "Size of a work unit of cards claimed by a worker thread"         \
           "during RSet scanning.")                                          \
                                                                             \
-  develop(bool, ReduceInitialCardMarksForG1, false,                         \
+  develop(uintx, G1SecondaryFreeListAppendLength, 5,                        \
+          "The number of regions we will add to the secondary free list "   \
+          "at every append operation")                                      \
+                                                                            \
+  develop(bool, G1ConcRegionFreeingVerbose, false,                          \
+          "Enables verboseness during concurrent region freeing")           \
+                                                                            \
+  develop(bool, G1StressConcRegionFreeing, false,                           \
+          "It stresses the concurrent region freeing operation")            \
+                                                                            \
+  develop(uintx, G1StressConcRegionFreeingDelayMillis, 0,                   \
+          "Artificial delay during concurrent region freeing")              \
+                                                                            \
+   develop(bool, ReduceInitialCardMarksForG1, false,                        \
           "When ReduceInitialCardMarks is true, this flag setting "         \
           " controls whether G1 allows the RICM optimization")
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/g1/concurrentZFThread.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
@@ -348,22 +347,20 @@
 }
 
 void HeapRegion::hr_clear(bool par, bool clear_space) {
-  _humongous_type = NotHumongous;
-  _humongous_start_region = NULL;
+  assert(_humongous_type == NotHumongous,
+         "we should have already filtered out humongous regions");
+  assert(_humongous_start_region == NULL,
+         "we should have already filtered out humongous regions");
+  assert(_end == _orig_end,
+         "we should have already filtered out humongous regions");
+
   _in_collection_set = false;
   _is_gc_alloc_region = false;
 
-  // Age stuff (if parallel, this will be done separately, since it needs
-  // to be sequential).
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
   set_young_index_in_cset(-1);
   uninstall_surv_rate_group();
   set_young_type(NotYoung);
 
-  // In case it had been the start of a humongous sequence, reset its end.
-  set_end(_orig_end);
-
   if (!par) {
     // If this is parallel, this will be done later.
     HeapRegionRemSet* hrrs = rem_set();
@@ -386,26 +383,49 @@
 }
 // </PREDICTION>
 
-void HeapRegion::set_startsHumongous(HeapWord* new_end) {
+void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
+  assert(!isHumongous(), "sanity / pre-condition");
   assert(end() == _orig_end,
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
+  assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 
   _humongous_type = StartsHumongous;
   _humongous_start_region = this;
 
   set_end(new_end);
-  _offsets.set_for_starts_humongous(new_end);
+  _offsets.set_for_starts_humongous(new_top);
 }
 
-void HeapRegion::set_continuesHumongous(HeapRegion* start) {
+void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
+  assert(!isHumongous(), "sanity / pre-condition");
   assert(end() == _orig_end,
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
-  assert(start->startsHumongous(), "pre-condition");
+  assert(first_hr->startsHumongous(), "pre-condition");
 
   _humongous_type = ContinuesHumongous;
-  _humongous_start_region = start;
+  _humongous_start_region = first_hr;
+}
+
+void HeapRegion::set_notHumongous() {
+  assert(isHumongous(), "pre-condition");
+
+  if (startsHumongous()) {
+    assert(top() <= end(), "pre-condition");
+    set_end(_orig_end);
+    if (top() > end()) {
+      // at least one "continues humongous" region after it
+      set_top(end());
+    }
+  } else {
+    // continues humongous
+    assert(end() == _orig_end, "sanity");
+  }
+
+  assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
+  _humongous_type = NotHumongous;
+  _humongous_start_region = NULL;
 }
 
 bool HeapRegion::claimHeapRegion(jint claimValue) {
@@ -442,15 +462,6 @@
   return low;
 }
 
-void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
-  assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
-  _next_in_special_set = r;
-}
-
-void HeapRegion::set_on_unclean_list(bool b) {
-  _is_on_unclean_list = b;
-}
-
 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
   hr_clear(false/*par*/, clear_space);
@@ -468,15 +479,16 @@
     _hrs_index(-1),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
     _in_collection_set(false), _is_gc_alloc_region(false),
-    _is_on_free_list(false), _is_on_unclean_list(false),
     _next_in_special_set(NULL), _orig_end(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
     _young_type(NotYoung), _next_young_region(NULL),
-    _next_dirty_cards_region(NULL),
-    _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
-    _rem_set(NULL), _zfs(NotZeroFilled),
-    _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
+    _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
+#ifdef ASSERT
+    _containing_set(NULL),
+#endif // ASSERT
+     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
+    _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
     _predicted_bytes_to_copy(0)
 {
   _orig_end = mr.end();
@@ -551,86 +563,6 @@
   oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
 }
 
-#ifdef DEBUG
-HeapWord* HeapRegion::allocate(size_t size) {
-  jint state = zero_fill_state();
-  assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
-         zero_fill_is_allocated(),
-         "When ZF is on, only alloc in ZF'd regions");
-  return G1OffsetTableContigSpace::allocate(size);
-}
-#endif
-
-void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
-  assert(ZF_mon->owned_by_self() ||
-         Universe::heap()->is_gc_active(),
-         "Must hold the lock or be a full GC to modify.");
-#ifdef ASSERT
-  if (top() != bottom() && zfs != Allocated) {
-    ResourceMark rm;
-    stringStream region_str;
-    print_on(&region_str);
-    assert(top() == bottom() || zfs == Allocated,
-           err_msg("Region must be empty, or we must be setting it to allocated. "
-                   "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
-  }
-#endif
-  _zfs = zfs;
-}
-
-void HeapRegion::set_zero_fill_complete() {
-  set_zero_fill_state_work(ZeroFilled);
-  if (ZF_mon->owned_by_self()) {
-    ZF_mon->notify_all();
-  }
-}
-
-
-void HeapRegion::ensure_zero_filled() {
-  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-  ensure_zero_filled_locked();
-}
-
-void HeapRegion::ensure_zero_filled_locked() {
-  assert(ZF_mon->owned_by_self(), "Precondition");
-  bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
-  assert(should_ignore_zf || Heap_lock->is_locked(),
-         "Either we're in a GC or we're allocating a region.");
-  switch (zero_fill_state()) {
-  case HeapRegion::NotZeroFilled:
-    set_zero_fill_in_progress(Thread::current());
-    {
-      ZF_mon->unlock();
-      Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
-      ZF_mon->lock_without_safepoint_check();
-    }
-    // A trap.
-    guarantee(zero_fill_state() == HeapRegion::ZeroFilling
-              && zero_filler() == Thread::current(),
-              "AHA!  Tell Dave D if you see this...");
-    set_zero_fill_complete();
-    // gclog_or_tty->print_cr("Did sync ZF.");
-    ConcurrentZFThread::note_sync_zfs();
-    break;
-  case HeapRegion::ZeroFilling:
-    if (should_ignore_zf) {
-      // We can "break" the lock and take over the work.
-      Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
-      set_zero_fill_complete();
-      ConcurrentZFThread::note_sync_zfs();
-      break;
-    } else {
-      ConcurrentZFThread::wait_for_ZF_completed(this);
-    }
-  case HeapRegion::ZeroFilled:
-    // Nothing to do.
-    break;
-  case HeapRegion::Allocated:
-    guarantee(false, "Should not call on allocated regions.");
-  }
-  assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
-}
-
 HeapWord*
 HeapRegion::object_iterate_mem_careful(MemRegion mr,
                                                  ObjectClosure* cl) {
@@ -782,9 +714,6 @@
   verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
 }
 
-#define OBJ_SAMPLE_INTERVAL 0
-#define BLOCK_SAMPLE_INTERVAL 100
-
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 // We would need a mechanism to make that code skip dead objects.
 
@@ -795,83 +724,125 @@
   *failures = false;
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
-  int objs = 0;
-  int blocks = 0;
   VerifyLiveClosure vl_cl(g1, use_prev_marking);
   bool is_humongous = isHumongous();
+  bool do_bot_verify = !is_young();
   size_t object_num = 0;
   while (p < top()) {
-    size_t size = oop(p)->size();
-    if (is_humongous != g1->isHumongous(size)) {
+    oop obj = oop(p);
+    size_t obj_size = obj->size();
+    object_num += 1;
+
+    if (is_humongous != g1->isHumongous(obj_size)) {
       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                              SIZE_FORMAT" words) in a %shumongous region",
-                             p, g1->isHumongous(size) ? "" : "non-",
-                             size, is_humongous ? "" : "non-");
+                             p, g1->isHumongous(obj_size) ? "" : "non-",
+                             obj_size, is_humongous ? "" : "non-");
        *failures = true;
+       return;
+    }
+
+    // If it returns false, verify_for_object() will output the
+    // appropriate messasge.
+    if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
+      *failures = true;
+      return;
     }
-    object_num += 1;
-    if (blocks == BLOCK_SAMPLE_INTERVAL) {
-      HeapWord* res = block_start_const(p + (size/2));
-      if (p != res) {
-        gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
-                               SIZE_FORMAT" returned "PTR_FORMAT,
-                               p, size, res);
+
+    if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
+      if (obj->is_oop()) {
+        klassOop klass = obj->klass();
+        if (!klass->is_perm()) {
+          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
+                                 "not in perm", klass, obj);
+          *failures = true;
+          return;
+        } else if (!klass->is_klass()) {
+          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
+                                 "not a klass", klass, obj);
+          *failures = true;
+          return;
+        } else {
+          vl_cl.set_containing_obj(obj);
+          obj->oop_iterate(&vl_cl);
+          if (vl_cl.failures()) {
+            *failures = true;
+          }
+          if (G1MaxVerifyFailures >= 0 &&
+              vl_cl.n_failures() >= G1MaxVerifyFailures) {
+            return;
+          }
+        }
+      } else {
+        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
         *failures = true;
         return;
       }
-      blocks = 0;
-    } else {
-      blocks++;
-    }
-    if (objs == OBJ_SAMPLE_INTERVAL) {
-      oop obj = oop(p);
-      if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
-        if (obj->is_oop()) {
-          klassOop klass = obj->klass();
-          if (!klass->is_perm()) {
-            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                   "not in perm", klass, obj);
-            *failures = true;
-            return;
-          } else if (!klass->is_klass()) {
-            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                   "not a klass", klass, obj);
-            *failures = true;
-            return;
-          } else {
-            vl_cl.set_containing_obj(obj);
-            obj->oop_iterate(&vl_cl);
-            if (vl_cl.failures()) {
-              *failures = true;
-            }
-            if (G1MaxVerifyFailures >= 0 &&
-                vl_cl.n_failures() >= G1MaxVerifyFailures) {
-              return;
-            }
-          }
-        } else {
-          gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
-          *failures = true;
-          return;
-        }
-      }
-      objs = 0;
-    } else {
-      objs++;
     }
     prev_p = p;
-    p += size;
+    p += obj_size;
+  }
+
+  if (p != top()) {
+    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
+                           "does not match top "PTR_FORMAT, p, top());
+    *failures = true;
+    return;
   }
-  HeapWord* rend = end();
-  HeapWord* rtop = top();
-  if (rtop < rend) {
-    HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
-    if (res != rtop) {
-        gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
-                               PTR_FORMAT" returned "PTR_FORMAT,
-                               rtop, rend, res);
+
+  HeapWord* the_end = end();
+  assert(p == top(), "it should still hold");
+  // Do some extra BOT consistency checking for addresses in the
+  // range [top, end). BOT look-ups in this range should yield
+  // top. No point in doing that if top == end (there's nothing there).
+  if (p < the_end) {
+    // Look up top
+    HeapWord* addr_1 = p;
+    HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
+    if (b_start_1 != p) {
+      gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
+                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                             addr_1, b_start_1, p);
+      *failures = true;
+      return;
+    }
+
+    // Look up top + 1
+    HeapWord* addr_2 = p + 1;
+    if (addr_2 < the_end) {
+      HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
+      if (b_start_2 != p) {
+        gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
+                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                               addr_2, b_start_2, p);
         *failures = true;
         return;
+      }
+    }
+
+    // Look up an address between top and end
+    size_t diff = pointer_delta(the_end, p) / 2;
+    HeapWord* addr_3 = p + diff;
+    if (addr_3 < the_end) {
+      HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
+      if (b_start_3 != p) {
+        gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
+                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                               addr_3, b_start_3, p);
+        *failures = true;
+        return;
+      }
+    }
+
+    // Loook up end - 1
+    HeapWord* addr_4 = the_end - 1;
+    HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
+    if (b_start_4 != p) {
+      gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
+                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
+                             addr_4, b_start_4, p);
+      *failures = true;
+      return;
     }
   }
 
@@ -880,12 +851,6 @@
                            "but has "SIZE_FORMAT", objects",
                            bottom(), end(), object_num);
     *failures = true;
-  }
-
-  if (p != top()) {
-    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
-                           "does not match top "PTR_FORMAT, p, top());
-    *failures = true;
     return;
   }
 }
@@ -976,67 +941,3 @@
   _offsets.set_space(this);
   initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
 }
-
-size_t RegionList::length() {
-  size_t len = 0;
-  HeapRegion* cur = hd();
-  DEBUG_ONLY(HeapRegion* last = NULL);
-  while (cur != NULL) {
-    len++;
-    DEBUG_ONLY(last = cur);
-    cur = get_next(cur);
-  }
-  assert(last == tl(), "Invariant");
-  return len;
-}
-
-void RegionList::insert_before_head(HeapRegion* r) {
-  assert(well_formed(), "Inv");
-  set_next(r, hd());
-  _hd = r;
-  _sz++;
-  if (tl() == NULL) _tl = r;
-  assert(well_formed(), "Inv");
-}
-
-void RegionList::prepend_list(RegionList* new_list) {
-  assert(well_formed(), "Precondition");
-  assert(new_list->well_formed(), "Precondition");
-  HeapRegion* new_tl = new_list->tl();
-  if (new_tl != NULL) {
-    set_next(new_tl, hd());
-    _hd = new_list->hd();
-    _sz += new_list->sz();
-    if (tl() == NULL) _tl = new_list->tl();
-  } else {
-    assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
-  }
-  assert(well_formed(), "Inv");
-}
-
-void RegionList::delete_after(HeapRegion* r) {
-  assert(well_formed(), "Precondition");
-  HeapRegion* next = get_next(r);
-  assert(r != NULL, "Precondition");
-  HeapRegion* next_tl = get_next(next);
-  set_next(r, next_tl);
-  dec_sz();
-  if (next == tl()) {
-    assert(next_tl == NULL, "Inv");
-    _tl = r;
-  }
-  assert(well_formed(), "Inv");
-}
-
-HeapRegion* RegionList::pop() {
-  assert(well_formed(), "Inv");
-  HeapRegion* res = hd();
-  if (res != NULL) {
-    _hd = get_next(res);
-    _sz--;
-    set_next(res, NULL);
-    if (sz() == 0) _tl = NULL;
-  }
-  assert(well_formed(), "Inv");
-  return res;
-}
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,11 @@
 class HeapRegionRemSet;
 class HeapRegionRemSetIterator;
 class HeapRegion;
+class HeapRegionSetBase;
+
+#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
+#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
+                               (__hr)->top(), (__hr)->end()
 
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
@@ -173,6 +178,19 @@
   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 
   virtual void print() const;
+
+  void reset_bot() {
+    _offsets.zero_bottom_entry();
+    _offsets.initialize_threshold();
+  }
+
+  void update_bot_for_object(HeapWord* start, size_t word_size) {
+    _offsets.alloc_block(start, word_size);
+  }
+
+  void print_bot_on(outputStream* out) {
+    _offsets.print_on(out);
+  }
 };
 
 class HeapRegion: public G1OffsetTableContigSpace {
@@ -214,12 +232,6 @@
   // True iff the region is in current collection_set.
   bool _in_collection_set;
 
-    // True iff the region is on the unclean list, waiting to be zero filled.
-  bool _is_on_unclean_list;
-
-  // True iff the region is on the free list, ready for allocation.
-  bool _is_on_free_list;
-
   // Is this or has it been an allocation region in the current collection
   // pause.
   bool _is_gc_alloc_region;
@@ -241,6 +253,13 @@
   // Next region whose cards need cleaning
   HeapRegion* _next_dirty_cards_region;
 
+  // Fields used by the HeapRegionSetBase class and subclasses.
+  HeapRegion* _next;
+#ifdef ASSERT
+  HeapRegionSetBase* _containing_set;
+#endif // ASSERT
+  bool _pending_removal;
+
   // For parallel heapRegion traversal.
   jint _claimed;
 
@@ -292,10 +311,6 @@
     _top_at_conc_mark_count = bot;
   }
 
-  jint _zfs;  // A member of ZeroFillState.  Protected by ZF_lock.
-  Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
-                        // made it so.
-
   void set_young_type(YoungType new_type) {
     //assert(_young_type != new_type, "setting the same type" );
     // TODO: add more assertions here
@@ -349,15 +364,14 @@
     RebuildRSClaimValue   = 5
   };
 
-  // Concurrent refinement requires contiguous heap regions (in which TLABs
-  // might be allocated) to be zero-filled.  Each region therefore has a
-  // zero-fill-state.
-  enum ZeroFillState {
-    NotZeroFilled,
-    ZeroFilling,
-    ZeroFilled,
-    Allocated
-  };
+  inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
+    assert(is_young(), "we can only skip BOT updates on young regions");
+    return ContiguousSpace::par_allocate(word_size);
+  }
+  inline HeapWord* allocate_no_bot_updates(size_t word_size) {
+    assert(is_young(), "we can only skip BOT updates on young regions");
+    return ContiguousSpace::allocate(word_size);
+  }
 
   // If this region is a member of a HeapRegionSeq, the index in that
   // sequence, otherwise -1.
@@ -404,13 +418,38 @@
     return _humongous_start_region;
   }
 
-  // Causes the current region to represent a humongous object spanning "n"
-  // regions.
-  void set_startsHumongous(HeapWord* new_end);
+  // Makes the current region be a "starts humongous" region, i.e.,
+  // the first region in a series of one or more contiguous regions
+  // that will contain a single "humongous" object. The two parameters
+  // are as follows:
+  //
+  // new_top : The new value of the top field of this region which
+  // points to the end of the humongous object that's being
+  // allocated. If there is more than one region in the series, top
+  // will lie beyond this region's original end field and on the last
+  // region in the series.
+  //
+  // new_end : The new value of the end field of this region which
+  // points to the end of the last region in the series. If there is
+  // one region in the series (namely: this one) end will be the same
+  // as the original end of this region.
+  //
+  // Updating top and end as described above makes this region look as
+  // if it spans the entire space taken up by all the regions in the
+  // series and an single allocation moved its top to new_top. This
+  // ensures that the space (capacity / allocated) taken up by all
+  // humongous regions can be calculated by just looking at the
+  // "starts humongous" regions and by ignoring the "continues
+  // humongous" regions.
+  void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
 
-  // The regions that continue a humongous sequence should be added using
-  // this method, in increasing address order.
-  void set_continuesHumongous(HeapRegion* start);
+  // Makes the current region be a "continues humongous'
+  // region. first_hr is the "start humongous" region of the series
+  // which this region will be part of.
+  void set_continuesHumongous(HeapRegion* first_hr);
+
+  // Unsets the humongous-related fields on the region.
+  void set_notHumongous();
 
   // If the region has a remembered set, return a pointer to it.
   HeapRegionRemSet* rem_set() const {
@@ -458,45 +497,56 @@
     _next_in_special_set = r;
   }
 
-  bool is_on_free_list() {
-    return _is_on_free_list;
-  }
+  // Methods used by the HeapRegionSetBase class and subclasses.
 
-  void set_on_free_list(bool b) {
-    _is_on_free_list = b;
-  }
+  // Getter and setter for the next field used to link regions into
+  // linked lists.
+  HeapRegion* next()              { return _next; }
+
+  void set_next(HeapRegion* next) { _next = next; }
 
-  HeapRegion* next_from_free_list() {
-    assert(is_on_free_list(),
-           "Should only invoke on free space.");
-    assert(_next_in_special_set == NULL ||
-           _next_in_special_set->is_on_free_list(),
-           "Malformed Free List.");
-    return _next_in_special_set;
-  }
+  // Every region added to a set is tagged with a reference to that
+  // set. This is used for doing consistency checking to make sure that
+  // the contents of a set are as they should be and it's only
+  // available in non-product builds.
+#ifdef ASSERT
+  void set_containing_set(HeapRegionSetBase* containing_set) {
+    assert((containing_set == NULL && _containing_set != NULL) ||
+           (containing_set != NULL && _containing_set == NULL),
+           err_msg("containing_set: "PTR_FORMAT" "
+                   "_containing_set: "PTR_FORMAT,
+                   containing_set, _containing_set));
+
+    _containing_set = containing_set;
+}
 
-  void set_next_on_free_list(HeapRegion* r) {
-    assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
-    _next_in_special_set = r;
-  }
+  HeapRegionSetBase* containing_set() { return _containing_set; }
+#else // ASSERT
+  void set_containing_set(HeapRegionSetBase* containing_set) { }
 
-  bool is_on_unclean_list() {
-    return _is_on_unclean_list;
-  }
+  // containing_set() is only used in asserts so there's not reason
+  // to provide a dummy version of it.
+#endif // ASSERT
 
-  void set_on_unclean_list(bool b);
+  // If we want to remove regions from a list in bulk we can simply tag
+  // them with the pending_removal tag and call the
+  // remove_all_pending() method on the list.
 
-  HeapRegion* next_from_unclean_list() {
-    assert(is_on_unclean_list(),
-           "Should only invoke on unclean space.");
-    assert(_next_in_special_set == NULL ||
-           _next_in_special_set->is_on_unclean_list(),
-           "Malformed unclean List.");
-    return _next_in_special_set;
+  bool pending_removal() { return _pending_removal; }
+
+  void set_pending_removal(bool pending_removal) {
+    // We can only set pending_removal to true, if it's false and the
+    // region belongs to a set.
+    assert(!pending_removal ||
+           (!_pending_removal && containing_set() != NULL), "pre-condition");
+    // We can only set pending_removal to false, if it's true and the
+    // region does not belong to a set.
+    assert( pending_removal ||
+           ( _pending_removal && containing_set() == NULL), "pre-condition");
+
+    _pending_removal = pending_removal;
   }
 
-  void set_next_on_unclean_list(HeapRegion* r);
-
   HeapRegion* get_next_young_region() { return _next_young_region; }
   void set_next_young_region(HeapRegion* hr) {
     _next_young_region = hr;
@@ -515,11 +565,6 @@
 
   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 
-  // Ensure that "this" is zero-filled.
-  void ensure_zero_filled();
-  // This one requires that the calling thread holds ZF_mon.
-  void ensure_zero_filled_locked();
-
   // Get the start of the unmarked area in this region.
   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
@@ -754,36 +799,6 @@
   // "end" of the region if there is no such block.
   HeapWord* next_block_start_careful(HeapWord* addr);
 
-  // Returns the zero-fill-state of the current region.
-  ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
-  bool zero_fill_is_allocated() { return _zfs == Allocated; }
-  Thread* zero_filler() { return _zero_filler; }
-
-  // Indicate that the contents of the region are unknown, and therefore
-  // might require zero-filling.
-  void set_zero_fill_needed() {
-    set_zero_fill_state_work(NotZeroFilled);
-  }
-  void set_zero_fill_in_progress(Thread* t) {
-    set_zero_fill_state_work(ZeroFilling);
-    _zero_filler = t;
-  }
-  void set_zero_fill_complete();
-  void set_zero_fill_allocated() {
-    set_zero_fill_state_work(Allocated);
-  }
-
-  void set_zero_fill_state_work(ZeroFillState zfs);
-
-  // This is called when a full collection shrinks the heap.
-  // We want to set the heap region to a value which says
-  // it is no longer part of the heap.  For now, we'll let "NotZF" fill
-  // that role.
-  void reset_zero_fill() {
-    set_zero_fill_state_work(NotZeroFilled);
-    _zero_filler = NULL;
-  }
-
   size_t recorded_rs_length() const        { return _recorded_rs_length; }
   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
@@ -822,10 +837,6 @@
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool allow_dirty) const;
-
-#ifdef DEBUG
-  HeapWord* allocate(size_t size);
-#endif
 };
 
 // HeapRegionClosure is used for iterating over regions.
@@ -848,113 +859,6 @@
   bool complete() { return _complete; }
 };
 
-// A linked lists of heap regions.  It leaves the "next" field
-// unspecified; that's up to subtypes.
-class RegionList VALUE_OBJ_CLASS_SPEC {
-protected:
-  virtual HeapRegion* get_next(HeapRegion* chr) = 0;
-  virtual void set_next(HeapRegion* chr,
-                        HeapRegion* new_next) = 0;
-
-  HeapRegion* _hd;
-  HeapRegion* _tl;
-  size_t _sz;
-
-  // Protected constructor because this type is only meaningful
-  // when the _get/_set next functions are defined.
-  RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
-public:
-  void reset() {
-    _hd = NULL;
-    _tl = NULL;
-    _sz = 0;
-  }
-  HeapRegion* hd() { return _hd; }
-  HeapRegion* tl() { return _tl; }
-  size_t sz() { return _sz; }
-  size_t length();
-
-  bool well_formed() {
-    return
-      ((hd() == NULL && tl() == NULL && sz() == 0)
-       || (hd() != NULL && tl() != NULL && sz() > 0))
-      && (sz() == length());
-  }
-  virtual void insert_before_head(HeapRegion* r);
-  void prepend_list(RegionList* new_list);
-  virtual HeapRegion* pop();
-  void dec_sz() { _sz--; }
-  // Requires that "r" is an element of the list, and is not the tail.
-  void delete_after(HeapRegion* r);
-};
-
-class EmptyNonHRegionList: public RegionList {
-protected:
-  // Protected constructor because this type is only meaningful
-  // when the _get/_set next functions are defined.
-  EmptyNonHRegionList() : RegionList() {}
-
-public:
-  void insert_before_head(HeapRegion* r) {
-    //    assert(r->is_empty(), "Better be empty");
-    assert(!r->isHumongous(), "Better not be humongous.");
-    RegionList::insert_before_head(r);
-  }
-  void prepend_list(EmptyNonHRegionList* new_list) {
-    //    assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
-    //     "Better be empty");
-    assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
-           "Better not be humongous.");
-    //    assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
-    //     "Better be empty");
-    assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
-           "Better not be humongous.");
-    RegionList::prepend_list(new_list);
-  }
-};
-
-class UncleanRegionList: public EmptyNonHRegionList {
-public:
-  HeapRegion* get_next(HeapRegion* hr) {
-    return hr->next_from_unclean_list();
-  }
-  void set_next(HeapRegion* hr, HeapRegion* new_next) {
-    hr->set_next_on_unclean_list(new_next);
-  }
-
-  UncleanRegionList() : EmptyNonHRegionList() {}
-
-  void insert_before_head(HeapRegion* r) {
-    assert(!r->is_on_free_list(),
-           "Better not already be on free list");
-    assert(!r->is_on_unclean_list(),
-           "Better not already be on unclean list");
-    r->set_zero_fill_needed();
-    r->set_on_unclean_list(true);
-    EmptyNonHRegionList::insert_before_head(r);
-  }
-  void prepend_list(UncleanRegionList* new_list) {
-    assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
-           "Better not already be on free list");
-    assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
-           "Better already be marked as on unclean list");
-    assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
-           "Better not already be on free list");
-    assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
-           "Better already be marked as on unclean list");
-    EmptyNonHRegionList::prepend_list(new_list);
-  }
-  HeapRegion* pop() {
-    HeapRegion* res = RegionList::pop();
-    if (res != NULL) res->set_on_unclean_list(false);
-    return res;
-  }
-};
-
-// Local Variables: ***
-// c-indentation-style: gnu ***
-// End: ***
-
 #endif // SERIALGC
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,152 +65,6 @@
 
 // Private methods.
 
-HeapWord*
-HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
-  assert(G1CollectedHeap::isHumongous(word_size),
-         "Allocation size should be humongous");
-  int cur = ind;
-  int first = cur;
-  size_t sumSizes = 0;
-  while (cur < _regions.length() && sumSizes < word_size) {
-    // Loop invariant:
-    //  For all i in [first, cur):
-    //       _regions.at(i)->is_empty()
-    //    && _regions.at(i) is contiguous with its predecessor, if any
-    //  && sumSizes is the sum of the sizes of the regions in the interval
-    //       [first, cur)
-    HeapRegion* curhr = _regions.at(cur);
-    if (curhr->is_empty()
-        && (first == cur
-            || (_regions.at(cur-1)->end() ==
-                curhr->bottom()))) {
-      sumSizes += curhr->capacity() / HeapWordSize;
-    } else {
-      first = cur + 1;
-      sumSizes = 0;
-    }
-    cur++;
-  }
-  if (sumSizes >= word_size) {
-    _alloc_search_start = cur;
-
-    // We need to initialize the region(s) we just discovered. This is
-    // a bit tricky given that it can happen concurrently with
-    // refinement threads refining cards on these regions and
-    // potentially wanting to refine the BOT as they are scanning
-    // those cards (this can happen shortly after a cleanup; see CR
-    // 6991377). So we have to set up the region(s) carefully and in
-    // a specific order.
-
-    // Currently, allocs_are_zero_filled() returns false. The zero
-    // filling infrastructure will be going away soon (see CR 6977804).
-    // So no need to do anything else here.
-    bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
-    assert(!zf, "not supported");
-
-    // This will be the "starts humongous" region.
-    HeapRegion* first_hr = _regions.at(first);
-    {
-      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-      first_hr->set_zero_fill_allocated();
-    }
-    // The header of the new object will be placed at the bottom of
-    // the first region.
-    HeapWord* new_obj = first_hr->bottom();
-    // This will be the new end of the first region in the series that
-    // should also match the end of the last region in the seriers.
-    // (Note: sumSizes = "region size" x "number of regions we found").
-    HeapWord* new_end = new_obj + sumSizes;
-    // This will be the new top of the first region that will reflect
-    // this allocation.
-    HeapWord* new_top = new_obj + word_size;
-
-    // First, we need to zero the header of the space that we will be
-    // allocating. When we update top further down, some refinement
-    // threads might try to scan the region. By zeroing the header we
-    // ensure that any thread that will try to scan the region will
-    // come across the zero klass word and bail out.
-    //
-    // NOTE: It would not have been correct to have used
-    // CollectedHeap::fill_with_object() and make the space look like
-    // an int array. The thread that is doing the allocation will
-    // later update the object header to a potentially different array
-    // type and, for a very short period of time, the klass and length
-    // fields will be inconsistent. This could cause a refinement
-    // thread to calculate the object size incorrectly.
-    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
-
-    // We will set up the first region as "starts humongous". This
-    // will also update the BOT covering all the regions to reflect
-    // that there is a single object that starts at the bottom of the
-    // first region.
-    first_hr->set_startsHumongous(new_end);
-
-    // Then, if there are any, we will set up the "continues
-    // humongous" regions.
-    HeapRegion* hr = NULL;
-    for (int i = first + 1; i < cur; ++i) {
-      hr = _regions.at(i);
-      {
-        MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
-        hr->set_zero_fill_allocated();
-      }
-      hr->set_continuesHumongous(first_hr);
-    }
-    // If we have "continues humongous" regions (hr != NULL), then the
-    // end of the last one should match new_end.
-    assert(hr == NULL || hr->end() == new_end, "sanity");
-
-    // Up to this point no concurrent thread would have been able to
-    // do any scanning on any region in this series. All the top
-    // fields still point to bottom, so the intersection between
-    // [bottom,top] and [card_start,card_end] will be empty. Before we
-    // update the top fields, we'll do a storestore to make sure that
-    // no thread sees the update to top before the zeroing of the
-    // object header and the BOT initialization.
-    OrderAccess::storestore();
-
-    // Now that the BOT and the object header have been initialized,
-    // we can update top of the "starts humongous" region.
-    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
-           "new_top should be in this region");
-    first_hr->set_top(new_top);
-
-    // Now, we will update the top fields of the "continues humongous"
-    // regions. The reason we need to do this is that, otherwise,
-    // these regions would look empty and this will confuse parts of
-    // G1. For example, the code that looks for a consecutive number
-    // of empty regions will consider them empty and try to
-    // re-allocate them. We can extend is_empty() to also include
-    // !continuesHumongous(), but it is easier to just update the top
-    // fields here.
-    hr = NULL;
-    for (int i = first + 1; i < cur; ++i) {
-      hr = _regions.at(i);
-      if ((i + 1) == cur) {
-        // last continues humongous region
-        assert(hr->bottom() < new_top && new_top <= hr->end(),
-               "new_top should fall on this region");
-        hr->set_top(new_top);
-      } else {
-        // not last one
-        assert(new_top > hr->end(), "new_top should be above this region");
-        hr->set_top(hr->end());
-      }
-    }
-    // If we have continues humongous regions (hr != NULL), then the
-    // end of the last one should match new_end and its top should
-    // match new_top.
-    assert(hr == NULL ||
-           (hr->end() == new_end && hr->top() == new_top), "sanity");
-
-    return new_obj;
-  } else {
-    // If we started from the beginning, we want to know why we can't alloc.
-    return NULL;
-  }
-}
-
 void HeapRegionSeq::print_empty_runs() {
   int empty_run = 0;
   int n_empty = 0;
@@ -284,13 +138,67 @@
   return res;
 }
 
-HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
-  int cur = _alloc_search_start;
-  // Make sure "cur" is a valid index.
-  assert(cur >= 0, "Invariant.");
-  HeapWord* res = alloc_obj_from_region_index(cur, word_size);
-  if (res == NULL)
-    res = alloc_obj_from_region_index(0, word_size);
+int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
+  assert(num > 1, "pre-condition");
+  assert(0 <= from && from <= _regions.length(),
+         err_msg("from: %d should be valid and <= than %d",
+                 from, _regions.length()));
+
+  int curr = from;
+  int first = -1;
+  size_t num_so_far = 0;
+  while (curr < _regions.length() && num_so_far < num) {
+    HeapRegion* curr_hr = _regions.at(curr);
+    if (curr_hr->is_empty()) {
+      if (first == -1) {
+        first = curr;
+        num_so_far = 1;
+      } else {
+        num_so_far += 1;
+      }
+    } else {
+      first = -1;
+      num_so_far = 0;
+    }
+    curr += 1;
+  }
+
+  assert(num_so_far <= num, "post-condition");
+  if (num_so_far == num) {
+    // we find enough space for the humongous object
+    assert(from <= first && first < _regions.length(), "post-condition");
+    assert(first < curr && (curr - first) == (int) num, "post-condition");
+    for (int i = first; i < first + (int) num; ++i) {
+      assert(_regions.at(i)->is_empty(), "post-condition");
+    }
+    return first;
+  } else {
+    // we failed to find enough space for the humongous object
+    return -1;
+  }
+}
+
+int HeapRegionSeq::find_contiguous(size_t num) {
+  assert(num > 1, "otherwise we should not be calling this");
+  assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
+         err_msg("_alloc_search_start: %d should be valid and <= than %d",
+                 _alloc_search_start, _regions.length()));
+
+  int start = _alloc_search_start;
+  int res = find_contiguous_from(start, num);
+  if (res == -1 && start != 0) {
+    // Try starting from the beginning. If _alloc_search_start was 0,
+    // no point in doing this again.
+    res = find_contiguous_from(0, num);
+  }
+  if (res != -1) {
+    assert(0 <= res && res < _regions.length(),
+           err_msg("res: %d should be valid", res));
+    _alloc_search_start = res + (int) num;
+  }
+  assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
+         err_msg("_alloc_search_start: %d should be valid",
+                 _alloc_search_start));
   return res;
 }
 
@@ -376,6 +284,10 @@
 
 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
                                    size_t& num_regions_deleted) {
+  // Reset this in case it's currently pointing into the regions that
+  // we just removed.
+  _alloc_search_start = 0;
+
   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
 
@@ -395,7 +307,6 @@
     }
     assert(cur == _regions.top(), "Should be top");
     if (!cur->is_empty()) break;
-    cur->reset_zero_fill();
     shrink_bytes -= cur->capacity();
     num_regions_deleted++;
     _regions.pop();
@@ -410,7 +321,6 @@
   return MemRegion(last_start, end);
 }
 
-
 class PrintHeapRegionClosure : public  HeapRegionClosure {
 public:
   bool doHeapRegion(HeapRegion* r) {
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,9 +41,9 @@
   // (For efficiency only; private to obj_allocate after initialization.)
   int _alloc_search_start;
 
-  // Attempts to allocate a block of the (assumed humongous) word_size,
-  // starting at the region "ind".
-  HeapWord* alloc_obj_from_region_index(int ind, size_t word_size);
+  // Finds a contiguous set of empty regions of length num, starting
+  // from a given index.
+  int find_contiguous_from(int from, size_t num);
 
   // Currently, we're choosing collection sets in a round-robin fashion,
   // starting here.
@@ -76,11 +76,8 @@
   // that are available for allocation.
   size_t free_suffix();
 
-  // Requires "word_size" to be humongous (in the technical sense).  If
-  // possible, allocates a contiguous subsequence of the heap regions to
-  // satisfy the allocation, and returns the address of the beginning of
-  // that sequence, otherwise returns NULL.
-  HeapWord* obj_allocate(size_t word_size);
+  // Finds a contiguous set of empty regions of length num.
+  int find_contiguous(size_t num);
 
   // Apply the "doHeapRegion" method of "blk" to all regions in "this",
   // in address order, terminating the iteration early
@@ -106,7 +103,7 @@
 
   // If "addr" falls within a region in the sequence, return that region,
   // or else NULL.
-  HeapRegion* addr_to_region(const void* addr);
+  inline HeapRegion* addr_to_region(const void* addr);
 
   void print();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+size_t HeapRegionSetBase::_unrealistically_long_length = 0;
+
+//////////////////// HeapRegionSetBase ////////////////////
+
+void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
+  guarantee(_unrealistically_long_length == 0, "should only be set once");
+  _unrealistically_long_length = len;
+}
+
+size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
+  assert(hr->startsHumongous(), "pre-condition");
+  assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
+  size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
+  assert(region_num > 0, "sanity");
+  return region_num;
+}
+
+void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) {
+  msg->append("[%s] %s "
+              "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
+              "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
+              name(), message, length(), region_num(),
+              total_capacity_bytes(), total_used_bytes());
+  fill_in_ext_msg_extra(msg);
+}
+
+bool HeapRegionSetBase::verify_region(HeapRegion* hr,
+                                  HeapRegionSetBase* expected_containing_set) {
+  const char* error_message = NULL;
+
+  if (!regions_humongous()) {
+    if (hr->isHumongous()) {
+      error_message = "the region should not be humongous";
+    }
+  } else {
+    if (!hr->isHumongous() || !hr->startsHumongous()) {
+      error_message = "the region should be 'starts humongous'";
+    }
+  }
+
+  if (!regions_empty()) {
+    if (hr->is_empty()) {
+      error_message = "the region should not be empty";
+    }
+  } else {
+    if (!hr->is_empty()) {
+      error_message = "the region should be empty";
+    }
+  }
+
+#ifdef ASSERT
+  // The _containing_set field is only available when ASSERT is defined.
+  if (hr->containing_set() != expected_containing_set) {
+    error_message = "inconsistent containing set found";
+  }
+#endif // ASSERT
+
+  const char* extra_error_message = verify_region_extra(hr);
+  if (extra_error_message != NULL) {
+    error_message = extra_error_message;
+  }
+
+  if (error_message != NULL) {
+    outputStream* out = tty;
+    out->cr();
+    out->print_cr("## [%s] %s", name(), error_message);
+    out->print_cr("## Offending Region: "PTR_FORMAT, hr);
+    out->print_cr("   "HR_FORMAT, HR_FORMAT_PARAMS(hr));
+#ifdef ASSERT
+    out->print_cr("   containing set: "PTR_FORMAT, hr->containing_set());
+#endif // ASSERT
+    out->print_cr("## Offending Region Set: "PTR_FORMAT, this);
+    print_on(out);
+    return false;
+  } else {
+    return true;
+  }
+}
+
+void HeapRegionSetBase::verify() {
+  // It's important that we also observe the MT safety protocol even
+  // for the verification calls. If we do verification without the
+  // appropriate locks and the set changes underneath our feet
+  // verification might fail and send us on a wild goose chase.
+  hrl_assert_mt_safety_ok(this);
+
+  guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
+              total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
+            (!is_empty() && length() >= 0 && region_num() >= 0 &&
+              total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
+            hrl_ext_msg(this, "invariant"));
+
+  guarantee((!regions_humongous() && region_num() == length()) ||
+            ( regions_humongous() && region_num() >= length()),
+            hrl_ext_msg(this, "invariant"));
+
+  guarantee(!regions_empty() || total_used_bytes() == 0,
+            hrl_ext_msg(this, "invariant"));
+
+  guarantee(total_used_bytes() <= total_capacity_bytes(),
+            hrl_ext_msg(this, "invariant"));
+}
+
+void HeapRegionSetBase::verify_start() {
+  // See comment in verify() about MT safety and verification.
+  hrl_assert_mt_safety_ok(this);
+  assert(!_verify_in_progress,
+         hrl_ext_msg(this, "verification should not be in progress"));
+
+  // Do the basic verification first before we do the checks over the regions.
+  HeapRegionSetBase::verify();
+
+  _calc_length               = 0;
+  _calc_region_num           = 0;
+  _calc_total_capacity_bytes = 0;
+  _calc_total_used_bytes     = 0;
+  _verify_in_progress        = true;
+}
+
+void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
+  // See comment in verify() about MT safety and verification.
+  hrl_assert_mt_safety_ok(this);
+  assert(_verify_in_progress,
+         hrl_ext_msg(this, "verification should be in progress"));
+
+  guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification"));
+
+  _calc_length               += 1;
+  if (!hr->isHumongous()) {
+    _calc_region_num         += 1;
+  } else {
+    _calc_region_num         += calculate_region_num(hr);
+  }
+  _calc_total_capacity_bytes += hr->capacity();
+  _calc_total_used_bytes     += hr->used();
+}
+
+void HeapRegionSetBase::verify_end() {
+  // See comment in verify() about MT safety and verification.
+  hrl_assert_mt_safety_ok(this);
+  assert(_verify_in_progress,
+         hrl_ext_msg(this, "verification should be in progress"));
+
+  guarantee(length() == _calc_length,
+            hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == "
+                        "calc length: "SIZE_FORMAT,
+                        name(), length(), _calc_length));
+
+  guarantee(region_num() == _calc_region_num,
+            hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
+                        "calc region num: "SIZE_FORMAT,
+                        name(), region_num(), _calc_region_num));
+
+  guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
+            hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
+                        "calc capacity bytes: "SIZE_FORMAT,
+                        name(),
+                        total_capacity_bytes(), _calc_total_capacity_bytes));
+
+  guarantee(total_used_bytes() == _calc_total_used_bytes,
+            hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
+                        "calc used bytes: "SIZE_FORMAT,
+                        name(), total_used_bytes(), _calc_total_used_bytes));
+
+  _verify_in_progress = false;
+}
+
+void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
+  out->cr();
+  out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
+  out->print_cr("  Region Assumptions");
+  out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
+  out->print_cr("    empty             : %s", BOOL_TO_STR(regions_empty()));
+  out->print_cr("  Attributes");
+  out->print_cr("    length            : "SIZE_FORMAT_W(14), length());
+  out->print_cr("    region num        : "SIZE_FORMAT_W(14), region_num());
+  out->print_cr("    total capacity    : "SIZE_FORMAT_W(14)" bytes",
+                total_capacity_bytes());
+  out->print_cr("    total used        : "SIZE_FORMAT_W(14)" bytes",
+                total_used_bytes());
+}
+
+void HeapRegionSetBase::clear() {
+  _length           = 0;
+  _region_num       = 0;
+  _total_used_bytes = 0;
+}
+
+HeapRegionSetBase::HeapRegionSetBase(const char* name)
+  : _name(name), _verify_in_progress(false),
+    _calc_length(0), _calc_region_num(0),
+    _calc_total_capacity_bytes(0), _calc_total_used_bytes(0) { }
+
+//////////////////// HeapRegionSet ////////////////////
+
+void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
+  hrl_assert_mt_safety_ok(this);
+  hrl_assert_mt_safety_ok(proxy_set);
+  hrl_assert_sets_match(this, proxy_set);
+
+  verify_optional();
+  proxy_set->verify_optional();
+
+  if (proxy_set->is_empty()) return;
+
+  assert(proxy_set->length() <= _length,
+         hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
+                     "should be <= length: "SIZE_FORMAT,
+                     name(), proxy_set->length(), _length));
+  _length -= proxy_set->length();
+
+  assert(proxy_set->region_num() <= _region_num,
+         hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
+                     "should be <= region num: "SIZE_FORMAT,
+                     name(), proxy_set->region_num(), _region_num));
+  _region_num -= proxy_set->region_num();
+
+  assert(proxy_set->total_used_bytes() <= _total_used_bytes,
+         hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
+                     "should be <= used bytes: "SIZE_FORMAT,
+                     name(), proxy_set->total_used_bytes(),
+                     _total_used_bytes));
+  _total_used_bytes -= proxy_set->total_used_bytes();
+
+  proxy_set->clear();
+
+  verify_optional();
+  proxy_set->verify_optional();
+}
+
+//////////////////// HeapRegionLinkedList ////////////////////
+
+void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) {
+  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
+}
+
+void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
+  hrl_assert_mt_safety_ok(this);
+  hrl_assert_mt_safety_ok(from_list);
+
+  verify_optional();
+  from_list->verify_optional();
+
+  if (from_list->is_empty()) return;
+
+#ifdef ASSERT
+  HeapRegionLinkedListIterator iter(from_list);
+  while (iter.more_available()) {
+    HeapRegion* hr = iter.get_next();
+    // In set_containing_set() we check that we either set the value
+    // from NULL to non-NULL or vice versa to catch bugs. So, we have
+    // to NULL it first before setting it to the value.
+    hr->set_containing_set(NULL);
+    hr->set_containing_set(this);
+  }
+#endif // ASSERT
+
+  if (_tail != NULL) {
+    assert(length() >  0 && _head != NULL, hrl_ext_msg(this, "invariant"));
+    _tail->set_next(from_list->_head);
+  } else {
+    assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant"));
+    _head = from_list->_head;
+  }
+  _tail = from_list->_tail;
+
+  _length           += from_list->length();
+  _region_num       += from_list->region_num();
+  _total_used_bytes += from_list->total_used_bytes();
+  from_list->clear();
+
+  verify_optional();
+  from_list->verify_optional();
+}
+
+void HeapRegionLinkedList::remove_all() {
+  hrl_assert_mt_safety_ok(this);
+  verify_optional();
+
+  HeapRegion* curr = _head;
+  while (curr != NULL) {
+    hrl_assert_region_ok(this, curr, this);
+
+    HeapRegion* next = curr->next();
+    curr->set_next(NULL);
+    curr->set_containing_set(NULL);
+    curr = next;
+  }
+  clear();
+
+  verify_optional();
+}
+
+void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
+  hrl_assert_mt_safety_ok(this);
+  assert(target_count > 1, hrl_ext_msg(this, "pre-condition"));
+  assert(!is_empty(), hrl_ext_msg(this, "pre-condition"));
+
+  verify_optional();
+  DEBUG_ONLY(size_t old_length = length();)
+
+  HeapRegion* curr = _head;
+  HeapRegion* prev = NULL;
+  size_t count = 0;
+  while (curr != NULL) {
+    hrl_assert_region_ok(this, curr, this);
+    HeapRegion* next = curr->next();
+
+    if (curr->pending_removal()) {
+      assert(count < target_count,
+             hrl_err_msg("[%s] should not come across more regions "
+                         "pending for removal than target_count: "SIZE_FORMAT,
+                         name(), target_count));
+
+      if (prev == NULL) {
+        assert(_head == curr, hrl_ext_msg(this, "invariant"));
+        _head = next;
+      } else {
+        assert(_head != curr, hrl_ext_msg(this, "invariant"));
+        prev->set_next(next);
+      }
+      if (next == NULL) {
+        assert(_tail == curr, hrl_ext_msg(this, "invariant"));
+        _tail = prev;
+      } else {
+        assert(_tail != curr, hrl_ext_msg(this, "invariant"));
+      }
+
+      curr->set_next(NULL);
+      remove_internal(curr);
+      curr->set_pending_removal(false);
+
+      count += 1;
+
+      // If we have come across the target number of regions we can
+      // just bail out. However, for debugging purposes, we can just
+      // carry on iterating to make sure there are not more regions
+      // tagged with pending removal.
+      DEBUG_ONLY(if (count == target_count) break;)
+    } else {
+      prev = curr;
+    }
+    curr = next;
+  }
+
+  assert(count == target_count,
+         hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == "
+                     "target_count: "SIZE_FORMAT, name(), count, target_count));
+  assert(length() + target_count == old_length,
+         hrl_err_msg("[%s] new length should be consistent "
+                     "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
+                     "target_count: "SIZE_FORMAT,
+                     name(), length(), old_length, target_count));
+
+  verify_optional();
+}
+
+void HeapRegionLinkedList::verify() {
+  // See comment in HeapRegionSetBase::verify() about MT safety and
+  // verification.
+  hrl_assert_mt_safety_ok(this);
+
+  // This will also do the basic verification too.
+  verify_start();
+
+  HeapRegion* curr  = _head;
+  HeapRegion* prev1 = NULL;
+  HeapRegion* prev0 = NULL;
+  size_t      count = 0;
+  while (curr != NULL) {
+    verify_next_region(curr);
+
+    count += 1;
+    guarantee(count < _unrealistically_long_length,
+              hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
+                          "seems very long, is there maybe a cycle? "
+                          "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
+                          "prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
+                          name(), count, curr, prev0, prev1, length()));
+
+    prev1 = prev0;
+    prev0 = curr;
+    curr  = curr->next();
+  }
+
+  guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition"));
+
+  verify_end();
+}
+
+void HeapRegionLinkedList::clear() {
+  HeapRegionSetBase::clear();
+  _head = NULL;
+  _tail = NULL;
+}
+
+void HeapRegionLinkedList::print_on(outputStream* out, bool print_contents) {
+  HeapRegionSetBase::print_on(out, print_contents);
+  out->print_cr("  Linking");
+  out->print_cr("    head              : "PTR_FORMAT, _head);
+  out->print_cr("    tail              : "PTR_FORMAT, _tail);
+
+  if (print_contents) {
+    out->print_cr("  Contents");
+    HeapRegionLinkedListIterator iter(this);
+    while (iter.more_available()) {
+      HeapRegion* hr = iter.get_next();
+      hr->print_on(out);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,346 @@
+/*
+ * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
+
+#include "gc_implementation/g1/heapRegion.hpp"
+
+// Large buffer for some cases where the output might be larger than normal.
+#define HRL_ERR_MSG_BUFSZ 512
+typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
+
+// Set verification will be forced either if someone defines
+// HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
+// asserts are compiled in.
+#ifndef HEAP_REGION_SET_FORCE_VERIFY
+#define HEAP_REGION_SET_FORCE_VERIFY defined(ASSERT)
+#endif // HEAP_REGION_SET_FORCE_VERIFY
+
+//////////////////// HeapRegionSetBase ////////////////////
+
+// Base class for all the classes that represent heap region sets. It
+// contains the basic attributes that each set needs to maintain
+// (e.g., length, region num, used bytes sum) plus any shared
+// functionality (e.g., verification).
+
+class hrl_ext_msg;
+
+class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
+  friend class hrl_ext_msg;
+
+protected:
+  static size_t calculate_region_num(HeapRegion* hr);
+
+  static size_t _unrealistically_long_length;
+
+  // The number of regions added to the set. If the set contains
+  // only humongous regions, this reflects only 'starts humongous'
+  // regions and does not include 'continues humongous' ones.
+  size_t _length;
+
+  // The total number of regions represented by the set. If the set
+  // does not contain humongous regions, this should be the same as
+  // _length. If the set contains only humongous regions, this will
+  // include the 'continues humongous' regions.
+  size_t _region_num;
+
+  // We don't keep track of the total capacity explicitly, we instead
+  // recalculate it based on _region_num and the heap region size.
+
+  // The sum of used bytes in the all the regions in the set.
+  size_t _total_used_bytes;
+
+  const char* _name;
+
+  bool        _verify_in_progress;
+  size_t      _calc_length;
+  size_t      _calc_region_num;
+  size_t      _calc_total_capacity_bytes;
+  size_t      _calc_total_used_bytes;
+
+  // verify_region() is used to ensure that the contents of a region
+  // added to / removed from a set are consistent. Different sets
+  // make different assumptions about the regions added to them. So
+  // each set can override verify_region_extra(), which is called
+  // from verify_region(), and do any extra verification it needs to
+  // perform in that.
+  virtual const char* verify_region_extra(HeapRegion* hr) { return NULL; }
+  bool verify_region(HeapRegion* hr,
+                     HeapRegionSetBase* expected_containing_set);
+
+  // Indicates whether all regions in the set should be humongous or
+  // not. Only used during verification.
+  virtual bool regions_humongous() = 0;
+
+  // Indicates whether all regions in the set should be empty or
+  // not. Only used during verification.
+  virtual bool regions_empty() = 0;
+
+  // Subclasses can optionally override this to do MT safety protocol
+  // checks. It is called in an assert from all methods that perform
+  // updates on the set (and subclasses should also call it too).
+  virtual bool check_mt_safety() { return true; }
+
+  // fill_in_ext_msg() writes the the values of the set's attributes
+  // in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra()
+  // allows subclasses to append further information.
+  virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { }
+  void fill_in_ext_msg(hrl_ext_msg* msg, const char* message);
+
+  // It updates the fields of the set to reflect hr being added to
+  // the set.
+  inline void update_for_addition(HeapRegion* hr);
+
+  // It updates the fields of the set to reflect hr being added to
+  // the set and tags the region appropriately.
+  inline void add_internal(HeapRegion* hr);
+
+  // It updates the fields of the set to reflect hr being removed
+  // from the set.
+  inline void update_for_removal(HeapRegion* hr);
+
+  // It updates the fields of the set to reflect hr being removed
+  // from the set and tags the region appropriately.
+  inline void remove_internal(HeapRegion* hr);
+
+  // It clears all the fields of the sets. Note: it will not iterate
+  // over the set and remove regions from it. It assumes that the
+  // caller has already done so. It will literally just clear the fields.
+  virtual void clear();
+
+  HeapRegionSetBase(const char* name);
+
+public:
+  static void set_unrealistically_long_length(size_t len);
+
+  const char* name() { return _name; }
+
+  size_t length() { return _length; }
+
+  bool is_empty() { return _length == 0; }
+
+  size_t region_num() { return _region_num; }
+
+  size_t total_capacity_bytes() {
+    return region_num() << HeapRegion::LogOfHRGrainBytes;
+  }
+
+  size_t total_used_bytes() { return _total_used_bytes; }
+
+  virtual void verify();
+  void verify_start();
+  void verify_next_region(HeapRegion* hr);
+  void verify_end();
+
+#if HEAP_REGION_SET_FORCE_VERIFY
+  void verify_optional() {
+    verify();
+  }
+#else // HEAP_REGION_SET_FORCE_VERIFY
+  void verify_optional() { }
+#endif // HEAP_REGION_SET_FORCE_VERIFY
+
+  virtual void print_on(outputStream* out, bool print_contents = false);
+};
+
+// Customized err_msg for heap region sets. Apart from a
+// assert/guarantee-specific message it also prints out the values of
+// the fields of the associated set. This can be very helpful in
+// diagnosing failures.
+
+class hrl_ext_msg : public hrl_err_msg {
+public:
+  hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") {
+    set->fill_in_ext_msg(this, message);
+  }
+};
+
+// These two macros are provided for convenience, to keep the uses of
+// these two asserts a bit more concise.
+
+#define hrl_assert_mt_safety_ok(_set_)                                        \
+  do {                                                                        \
+    assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety"));    \
+  } while (0)
+
+#define hrl_assert_region_ok(_set_, _hr_, _expected_)                         \
+  do {                                                                        \
+    assert((_set_)->verify_region((_hr_), (_expected_)),                      \
+           hrl_ext_msg((_set_), "region verification"));                      \
+  } while (0)
+
+//////////////////// HeapRegionSet ////////////////////
+
+#define hrl_assert_sets_match(_set1_, _set2_)                                 \
+  do {                                                                        \
+    assert(((_set1_)->regions_humongous() ==                                  \
+                                            (_set2_)->regions_humongous()) && \
+           ((_set1_)->regions_empty() == (_set2_)->regions_empty()),          \
+           hrl_err_msg("the contents of set %s and set %s should match",      \
+                       (_set1_)->name(), (_set2_)->name()));                  \
+  } while (0)
+
+// This class represents heap region sets whose members are not
+// explicitly tracked. It's helpful to group regions using such sets
+// so that we can reason about all the region groups in the heap using
+// the same interface (namely, the HeapRegionSetBase API).
+
+class HeapRegionSet : public HeapRegionSetBase {
+protected:
+  virtual const char* verify_region_extra(HeapRegion* hr) {
+    if (hr->next() != NULL) {
+      return "next() should always be NULL as we do not link the regions";
+    }
+
+    return HeapRegionSetBase::verify_region_extra(hr);
+  }
+
+  HeapRegionSet(const char* name) : HeapRegionSetBase(name) {
+    clear();
+  }
+
+public:
+  // It adds hr to the set. The region should not be a member of
+  // another set.
+  inline void add(HeapRegion* hr);
+
+  // It removes hr from the set. The region should be a member of
+  // this set.
+  inline void remove(HeapRegion* hr);
+
+  // It removes a region from the set. Instead of updating the fields
+  // of the set to reflect this removal, it accumulates the updates
+  // in proxy_set. The idea is that proxy_set is thread-local to
+  // avoid multiple threads updating the fields of the set
+  // concurrently and having to synchronize. The method
+  // update_from_proxy() will update the fields of the set from the
+  // proxy_set.
+  inline void remove_with_proxy(HeapRegion* hr, HeapRegionSet* proxy_set);
+
+  // After multiple calls to remove_with_proxy() the updates to the
+  // fields of the set are accumulated in proxy_set. This call
+  // updates the fields of the set from proxy_set.
+  void update_from_proxy(HeapRegionSet* proxy_set);
+};
+
+//////////////////// HeapRegionLinkedList ////////////////////
+
+// A set that links all the regions added to it in a singly-linked
+// list. We should try to avoid doing operations that iterate over
+// such lists in performance critical paths. Typically we should
+// add / remove one region at a time or concatenate two lists. All
+// those operations are done in constant time.
+
+class HeapRegionLinkedListIterator;
+
+class HeapRegionLinkedList : public HeapRegionSetBase {
+  friend class HeapRegionLinkedListIterator;
+
+private:
+  HeapRegion* _head;
+  HeapRegion* _tail;
+
+  // These are provided for use by the friend classes.
+  HeapRegion* head() { return _head; }
+  HeapRegion* tail() { return _tail; }
+
+protected:
+  virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg);
+
+  // See the comment for HeapRegionSetBase::clear()
+  virtual void clear();
+
+  HeapRegionLinkedList(const char* name) : HeapRegionSetBase(name) {
+    clear();
+  }
+
+public:
+  // It adds hr to the list as the new tail. The region should not be
+  // a member of another set.
+  inline void add_as_tail(HeapRegion* hr);
+
+  // It removes and returns the head of the list. It assumes that the
+  // list is not empty so it will return a non-NULL value.
+  inline HeapRegion* remove_head();
+
+  // Convenience method.
+  inline HeapRegion* remove_head_or_null();
+
+  // It moves the regions from from_list to this list and empties
+  // from_list. The new regions will appear in the same order as they
+  // were in from_list and be linked in the end of this list.
+  void add_as_tail(HeapRegionLinkedList* from_list);
+
+  // It empties the list by removing all regions from it.
+  void remove_all();
+
+  // It removes all regions in the list that are pending for removal
+  // (i.e., they have been tagged with "pending_removal"). The list
+  // must not be empty, target_count should reflect the exact number
+  // of regions that are pending for removal in the list, and
+  // target_count should be > 1 (currently, we never need to remove a
+  // single region using this).
+  void remove_all_pending(size_t target_count);
+
+  virtual void verify();
+
+  virtual void print_on(outputStream* out, bool print_contents = false);
+};
+
+//////////////////// HeapRegionLinkedList ////////////////////
+
+// Iterator class that provides a convenient way to iterator over the
+// regions in a HeapRegionLinkedList instance.
+
+class HeapRegionLinkedListIterator : public StackObj {
+private:
+  HeapRegionLinkedList* _list;
+  HeapRegion*           _curr;
+
+public:
+  bool more_available() {
+    return _curr != NULL;
+  }
+
+  HeapRegion* get_next() {
+    assert(more_available(),
+           "get_next() should be called when more regions are available");
+
+    // If we are going to introduce a count in the iterator we should
+    // do the "cycle" check.
+
+    HeapRegion* hr = _curr;
+    assert(_list->verify_region(hr, _list), "region verification");
+    _curr = hr->next();
+    return hr;
+  }
+
+  HeapRegionLinkedListIterator(HeapRegionLinkedList* list)
+    : _curr(NULL), _list(list) {
+    _curr = list->head();
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,159 @@
+/*
+ * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
+
+#include "gc_implementation/g1/heapRegionSet.hpp"
+
+//////////////////// HeapRegionSetBase ////////////////////
+
+inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
+  // Assumes the caller has already verified the region.
+
+  _length           += 1;
+  if (!hr->isHumongous()) {
+    _region_num     += 1;
+  } else {
+    _region_num     += calculate_region_num(hr);
+  }
+  _total_used_bytes += hr->used();
+}
+
+inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
+  hrl_assert_region_ok(this, hr, NULL);
+  assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked"));
+
+  update_for_addition(hr);
+  hr->set_containing_set(this);
+}
+
+inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
+  // Assumes the caller has already verified the region.
+  assert(_length > 0, hrl_ext_msg(this, "pre-condition"));
+  _length -= 1;
+
+  size_t region_num_diff;
+  if (!hr->isHumongous()) {
+    region_num_diff = 1;
+  } else {
+    region_num_diff = calculate_region_num(hr);
+  }
+  assert(region_num_diff <= _region_num,
+         hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" "
+                     "should be <= region num: "SIZE_FORMAT,
+                     name(), region_num_diff, _region_num));
+  _region_num -= region_num_diff;
+
+  size_t used_bytes = hr->used();
+  assert(used_bytes <= _total_used_bytes,
+         hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
+                     "should be <= used bytes: "SIZE_FORMAT,
+                     name(), used_bytes, _total_used_bytes));
+  _total_used_bytes -= used_bytes;
+}
+
+inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
+  hrl_assert_region_ok(this, hr, this);
+  assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked"));
+
+  hr->set_containing_set(NULL);
+  update_for_removal(hr);
+}
+
+//////////////////// HeapRegionSet ////////////////////
+
+inline void HeapRegionSet::add(HeapRegion* hr) {
+  hrl_assert_mt_safety_ok(this);
+  // add_internal() will verify the region.
+  add_internal(hr);
+}
+
+inline void HeapRegionSet::remove(HeapRegion* hr) {
+  hrl_assert_mt_safety_ok(this);
+  // remove_internal() will verify the region.
+  remove_internal(hr);
+}
+
+inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
+                                             HeapRegionSet* proxy_set) {
+  // No need to fo the MT safety check here given that this method
+  // does not update the contents of the set but instead accumulates
+  // the changes in proxy_set which is assumed to be thread-local.
+  hrl_assert_sets_match(this, proxy_set);
+  hrl_assert_region_ok(this, hr, this);
+
+  hr->set_containing_set(NULL);
+  proxy_set->update_for_addition(hr);
+}
+
+//////////////////// HeapRegionLinkedList ////////////////////
+
+inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
+  hrl_assert_mt_safety_ok(this);
+  assert((length() == 0 && _head == NULL && _tail == NULL) ||
+         (length() >  0 && _head != NULL && _tail != NULL),
+         hrl_ext_msg(this, "invariant"));
+  // add_internal() will verify the region.
+  add_internal(hr);
+
+  // Now link the region.
+  if (_tail != NULL) {
+    _tail->set_next(hr);
+  } else {
+    _head = hr;
+  }
+  _tail = hr;
+}
+
+inline HeapRegion* HeapRegionLinkedList::remove_head() {
+  hrl_assert_mt_safety_ok(this);
+  assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty"));
+  assert(length() > 0 && _head != NULL && _tail != NULL,
+         hrl_ext_msg(this, "invariant"));
+
+  // We need to unlink it first.
+  HeapRegion* hr = _head;
+  _head = hr->next();
+  if (_head == NULL) {
+    _tail = NULL;
+  }
+  hr->set_next(NULL);
+
+  // remove_internal() will verify the region.
+  remove_internal(hr);
+  return hr;
+}
+
+inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
+  hrl_assert_mt_safety_ok(this);
+
+  if (!is_empty()) {
+    return remove_head();
+  } else {
+    return NULL;
+  }
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegionSets.hpp"
+
+//////////////////// FreeRegionList ////////////////////
+
+const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
+  if (hr->is_young()) {
+    return "the region should not be young";
+  }
+  // The superclass will check that the region is empty and
+  // not-humongous.
+  return HeapRegionLinkedList::verify_region_extra(hr);
+}
+
+//////////////////// MasterFreeRegionList ////////////////////
+
+bool MasterFreeRegionList::check_mt_safety() {
+  // Master Free List MT safety protocol:
+  // (a) If we're at a safepoint, operations on the master free list
+  // should be invoked by either the VM thread (which will serialize
+  // them) or by the GC workers while holding the
+  // FreeList_lock.
+  // (b) If we're not at a safepoint, operations on the master free
+  // list should be invoked while holding the Heap_lock.
+
+  guarantee((SafepointSynchronize::is_at_safepoint() &&
+               (Thread::current()->is_VM_thread() ||
+                                            FreeList_lock->owned_by_self())) ||
+            (!SafepointSynchronize::is_at_safepoint() &&
+                                                Heap_lock->owned_by_self()),
+            hrl_ext_msg(this, "master free list MT safety protocol"));
+
+  return FreeRegionList::check_mt_safety();
+}
+
+//////////////////// SecondaryFreeRegionList ////////////////////
+
+bool SecondaryFreeRegionList::check_mt_safety() {
+  // Secondary Free List MT safety protocol:
+  // Operations on the secondary free list should always be invoked
+  // while holding the SecondaryFreeList_lock.
+
+  guarantee(SecondaryFreeList_lock->owned_by_self(),
+            hrl_ext_msg(this, "secondary free list MT safety protocol"));
+
+  return FreeRegionList::check_mt_safety();
+}
+
+//////////////////// HumongousRegionSet ////////////////////
+
+const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
+  if (hr->is_young()) {
+    return "the region should not be young";
+  }
+  // The superclass will check that the region is not empty and
+  // humongous.
+  return HeapRegionSet::verify_region_extra(hr);
+}
+
+//////////////////// HumongousRegionSet ////////////////////
+
+bool MasterHumongousRegionSet::check_mt_safety() {
+  // Master Humongous Set MT safety protocol:
+  // (a) If we're at a safepoint, operations on the master humongous
+  // set should be invoked by either the VM thread (which will
+  // serialize them) or by the GC workers while holding the
+  // OldSets_lock.
+  // (b) If we're not at a safepoint, operations on the master
+  // humongous set should be invoked while holding the Heap_lock.
+
+  guarantee((SafepointSynchronize::is_at_safepoint() &&
+               (Thread::current()->is_VM_thread() ||
+                                             OldSets_lock->owned_by_self())) ||
+            (!SafepointSynchronize::is_at_safepoint() &&
+                                                 Heap_lock->owned_by_self()),
+            hrl_ext_msg(this, "master humongous set MT safety protocol"));
+  return HumongousRegionSet::check_mt_safety();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,86 @@
+/*
+ * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
+
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+//////////////////// FreeRegionList ////////////////////
+
+class FreeRegionList : public HeapRegionLinkedList {
+protected:
+  virtual const char* verify_region_extra(HeapRegion* hr);
+
+  virtual bool regions_humongous() { return false; }
+  virtual bool regions_empty()     { return true;  }
+
+public:
+  FreeRegionList(const char* name) : HeapRegionLinkedList(name) { }
+};
+
+//////////////////// MasterFreeRegionList ////////////////////
+
+class MasterFreeRegionList : public FreeRegionList {
+protected:
+  virtual bool check_mt_safety();
+
+public:
+  MasterFreeRegionList(const char* name) : FreeRegionList(name) { }
+};
+
+//////////////////// SecondaryFreeRegionList ////////////////////
+
+class SecondaryFreeRegionList : public FreeRegionList {
+protected:
+  virtual bool check_mt_safety();
+
+public:
+  SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { }
+};
+
+//////////////////// HumongousRegionSet ////////////////////
+
+class HumongousRegionSet : public HeapRegionSet {
+protected:
+  virtual const char* verify_region_extra(HeapRegion* hr);
+
+  virtual bool regions_humongous() { return true;  }
+  virtual bool regions_empty()     { return false; }
+
+public:
+  HumongousRegionSet(const char* name) : HeapRegionSet(name) { }
+};
+
+//////////////////// MasterHumongousRegionSet ////////////////////
+
+class MasterHumongousRegionSet : public HumongousRegionSet {
+protected:
+  virtual bool check_mt_safety();
+
+public:
+  MasterHumongousRegionSet(const char* name) : HumongousRegionSet(name) { }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,8 +38,8 @@
 # include "thread_windows.inline.hpp"
 #endif
 
-PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) :
-  _qset(qset_), _buf(NULL), _index(0), _active(active),
+PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
+  _qset(qset), _buf(NULL), _index(0), _active(active),
   _perm(perm), _lock(NULL)
 {}
 
@@ -153,10 +153,16 @@
 }
 
 void PtrQueue::handle_zero_index() {
-  assert(0 == _index, "Precondition.");
+  assert(_index == 0, "Precondition.");
+
   // This thread records the full buffer and allocates a new one (while
   // holding the lock if there is one).
   if (_buf != NULL) {
+    if (!should_enqueue_buffer()) {
+      assert(_index > 0, "the buffer can only be re-used if it's not full");
+      return;
+    }
+
     if (_lock) {
       assert(_lock->owned_by_self(), "Required.");
 
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
 public:
   // Initialize this queue to contain a null buffer, and be part of the
   // given PtrQueueSet.
-  PtrQueue(PtrQueueSet*, bool perm = false, bool active = false);
+  PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
   // Release any contained resources.
   void flush();
   // Calls flush() when destroyed.
@@ -85,6 +85,14 @@
     else enqueue_known_active(ptr);
   }
 
+  // This method is called when we're doing the zero index handling
+  // and gives a chance to the queues to do any pre-enqueueing
+  // processing they might want to do on the buffer. It should return
+  // true if the buffer should be enqueued, or false if enough
+  // entries were cleared from it so that it can be re-used. It should
+  // not return false if the buffer is still full (otherwise we can
+  // get into an infinite loop).
+  virtual bool should_enqueue_buffer() { return true; }
   void handle_zero_index();
   void locking_enqueue_completed_buffer(void** buf);
 
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,98 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/satbQueue.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/sharedHeap.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.hpp"
 
+// This method removes entries from an SATB buffer that will not be
+// useful to the concurrent marking threads. An entry is removed if it
+// satisfies one of the following conditions:
+//
+// * it points to an object outside the G1 heap (G1's concurrent
+//     marking only visits objects inside the G1 heap),
+// * it points to an object that has been allocated since marking
+//     started (according to SATB those objects do not need to be
+//     visited during marking), or
+// * it points to an object that has already been marked (no need to
+//     process it again).
+//
+// The rest of the entries will be retained and are compacted towards
+// the top of the buffer. If with this filtering we clear a large
+// enough chunk of the buffer we can re-use it (instead of enqueueing
+// it) and we can just allow the mutator to carry on executing.
+
+bool ObjPtrQueue::should_enqueue_buffer() {
+  assert(_lock == NULL || _lock->owned_by_self(),
+         "we should have taken the lock before calling this");
+
+  // A value of 0 means "don't filter SATB buffers".
+  if (G1SATBBufferEnqueueingThresholdPercent == 0) {
+    return true;
+  }
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  // This method should only be called if there is a non-NULL buffer
+  // that is full.
+  assert(_index == 0, "pre-condition");
+  assert(_buf != NULL, "pre-condition");
+
+  void** buf = _buf;
+  size_t sz = _sz;
+
+  // Used for sanity checking at the end of the loop.
+  debug_only(size_t entries = 0; size_t retained = 0;)
+
+  size_t i = sz;
+  size_t new_index = sz;
+
+  // Given that we are expecting _index == 0, we could have changed
+  // the loop condition to (i > 0). But we are using _index for
+  // generality.
+  while (i > _index) {
+    assert(i > 0, "we should have at least one more entry to process");
+    i -= oopSize;
+    debug_only(entries += 1;)
+    oop* p = (oop*) &buf[byte_index_to_index((int) i)];
+    oop obj = *p;
+    // NULL the entry so that unused parts of the buffer contain NULLs
+    // at the end. If we are going to retain it we will copy it to its
+    // final place. If we have retained all entries we have visited so
+    // far, we'll just end up copying it to the same place.
+    *p = NULL;
+
+    bool retain = g1h->is_obj_ill(obj);
+    if (retain) {
+      assert(new_index > 0, "we should not have already filled up the buffer");
+      new_index -= oopSize;
+      assert(new_index >= i,
+             "new_index should never be below i, as we alwaysr compact 'up'");
+      oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
+      assert(new_p >= p, "the destination location should never be below "
+             "the source as we always compact 'up'");
+      assert(*new_p == NULL,
+             "we should have already cleared the destination location");
+      *new_p = obj;
+      debug_only(retained += 1;)
+    }
+  }
+  size_t entries_calc = (sz - _index) / oopSize;
+  assert(entries == entries_calc, "the number of entries we counted "
+         "should match the number of entries we calculated");
+  size_t retained_calc = (sz - new_index) / oopSize;
+  assert(retained == retained_calc, "the number of retained entries we counted "
+         "should match the number of retained entries we calculated");
+  size_t perc = retained_calc * 100 / entries_calc;
+  bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
+  _index = new_index;
+
+  return should_enqueue;
+}
+
 void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
   if (_buf != NULL) {
     apply_closure_to_buffer(cl, _buf, _index, _sz);
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,13 +33,18 @@
 // A ptrQueue whose elements are "oops", pointers to object heads.
 class ObjPtrQueue: public PtrQueue {
 public:
-  ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) :
+  ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
     // SATB queues are only active during marking cycles. We create
     // them with their active field set to false. If a thread is
     // created during a cycle and its SATB queue needs to be activated
     // before the thread starts running, we'll need to set its active
     // field to true. This is done in JavaThread::initialize_queues().
-    PtrQueue(qset_, perm, false /* active */) { }
+    PtrQueue(qset, perm, false /* active */) { }
+
+  // Overrides PtrQueue::should_enqueue_buffer(). See the method's
+  // definition for more information.
+  virtual bool should_enqueue_buffer();
+
   // Apply the closure to all elements, and reset the index to make the
   // buffer empty.
   void apply_closure(ObjectClosure* cl);
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -38,7 +38,6 @@
 }
 
 void VM_G1CollectForAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
   assert(_result == NULL || _pause_succeeded,
@@ -46,7 +45,6 @@
 }
 
 void VM_G1CollectFull::doit() {
-  JvmtiGCFullMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   GCCauseSetter x(g1h, _gc_cause);
   g1h->do_full_collection(false /* clear_all_soft_refs */);
@@ -72,7 +70,6 @@
 }
 
 void VM_G1IncCollectionPause::doit() {
-  JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert(!_should_initiate_conc_mark ||
   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1058,10 +1058,11 @@
 #endif
 
 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
-  if ((m != markOopDesc::prototype()) &&
-      (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) {
+  if (m->must_be_preserved_for_promotion_failure(obj)) {
+    // We should really have separate per-worker stacks, rather
+    // than use locking of a common pair of stacks.
     MutexLocker ml(ParGCRareEvent_lock);
-    DefNewGeneration::preserve_mark_if_necessary(obj, m);
+    preserve_mark(obj, m);
   }
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -258,6 +258,7 @@
     BiasedLocking::restore_marks();
     Threads::gc_epilogue();
     CodeCache::gc_epilogue();
+    JvmtiExport::gc_epilogue();
 
     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1054,6 +1054,7 @@
 
   Threads::gc_epilogue();
   CodeCache::gc_epilogue();
+  JvmtiExport::gc_epilogue();
 
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -694,6 +694,8 @@
 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
   _promotion_failed = true;
   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
+    // Should use per-worker private stakcs hetre rather than
+    // locking a common pair of stacks.
     ThreadCritical tc;
     _preserved_oop_stack.push(obj);
     _preserved_mark_stack.push(obj_mark);
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -42,8 +42,7 @@
 }
 
 void VM_ParallelGCFailedAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
-  notify_gc_begin(false);
+  SvcGCMarker sgcm(SvcGCMarker::MINOR);
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@@ -54,8 +53,6 @@
   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-
-  notify_gc_end();
 }
 
 VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size,
@@ -67,8 +64,7 @@
 }
 
 void VM_ParallelGCFailedPermanentAllocation::doit() {
-  JvmtiGCFullMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@@ -78,7 +74,6 @@
   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-  notify_gc_end();
 }
 
 // Only used for System.gc() calls
@@ -91,8 +86,7 @@
 }
 
 void VM_ParallelGCSystemGC::doit() {
-  JvmtiGCFullMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
@@ -106,5 +100,4 @@
   } else {
     heap->invoke_full_gc(false);
   }
-  notify_gc_end();
 }
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -31,7 +31,6 @@
 #include "memory/oopFactory.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
-#include "prims/jvmtiExport.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -40,6 +39,7 @@
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #endif
+
 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
 HS_DTRACE_PROBE_DECL(hotspot, gc__end);
 
@@ -158,8 +158,7 @@
 
 
 void VM_GenCollectForAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
-  notify_gc_begin(false);
+  SvcGCMarker sgcm(SvcGCMarker::MINOR);
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, _gc_cause);
@@ -169,22 +168,19 @@
   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-  notify_gc_end();
 }
 
 void VM_GenCollectFull::doit() {
-  JvmtiGCFullMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, _gc_cause);
   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
-  notify_gc_end();
 }
 
 void VM_GenCollectForPermanentAllocation::doit() {
-  JvmtiGCForAllocationMarker jgcm;
-  notify_gc_begin(true);
+  SvcGCMarker sgcm(SvcGCMarker::FULL);
+
   SharedHeap* heap = (SharedHeap*)Universe::heap();
   GCCauseSetter gccs(heap, _gc_cause);
   switch (heap->kind()) {
@@ -209,5 +205,4 @@
   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
-  notify_gc_end();
 }
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -30,6 +30,7 @@
 #include "runtime/jniHandles.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/vm_operations.hpp"
+#include "prims/jvmtiExport.hpp"
 
 // The following class hierarchy represents
 // a set of operations (VM_Operation) related to GC.
@@ -209,4 +210,19 @@
   HeapWord* result() const       { return _res; }
 };
 
+class SvcGCMarker : public StackObj {
+ private:
+  JvmtiGCMarker _jgcm;
+ public:
+  typedef enum { MINOR, FULL, OTHER } reason_type;
+
+  SvcGCMarker(reason_type reason ) {
+    VM_GC_Operation::notify_gc_begin(reason == FULL);
+  }
+
+  ~SvcGCMarker() {
+    VM_GC_Operation::notify_gc_end();
+  }
+};
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_VMGCOPERATIONS_HPP
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -153,6 +153,7 @@
       check_for_non_bad_heap_word_value(result, size));
     assert(!HAS_PENDING_EXCEPTION,
            "Unexpected exception, will result in uninitialized storage");
+    THREAD->incr_allocated_bytes(size * HeapWordSize);
     return result;
   }
 
--- a/src/share/vm/interpreter/bytecode.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecode.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,30 +34,6 @@
 
 // Implementation of Bytecode
 
-bool Bytecode::check_must_rewrite(Bytecodes::Code code) const {
-  assert(Bytecodes::can_rewrite(code), "post-check only");
-
-  // Some codes are conditionally rewriting.  Look closely at them.
-  switch (code) {
-  case Bytecodes::_aload_0:
-    // Even if RewriteFrequentPairs is turned on,
-    // the _aload_0 code might delay its rewrite until
-    // a following _getfield rewrites itself.
-    return false;
-
-  case Bytecodes::_lookupswitch:
-    return false;  // the rewrite is not done by the interpreter
-
-  case Bytecodes::_new:
-    // (Could actually look at the class here, but the profit would be small.)
-    return false;  // the rewrite is not always done
-  }
-
-  // No other special cases.
-  return true;
-}
-
-
 #ifdef ASSERT
 
 void Bytecode::assert_same_format_as(Bytecodes::Code testbc, bool is_wide) const {
@@ -188,17 +164,16 @@
   // Note:  Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
   // at the same time it allocates per-call-site CP cache entries.
   Bytecodes::Code rawc = code();
-  Bytecode* invoke = bytecode();
-  if (invoke->has_index_u4(rawc))
-    return invoke->get_index_u4(rawc);
+  if (has_index_u4(rawc))
+    return get_index_u4(rawc);
   else
-    return invoke->get_index_u2_cpcache(rawc);
+    return get_index_u2_cpcache(rawc);
 }
 
 int Bytecode_member_ref::pool_index() const {
   int index = this->index();
   DEBUG_ONLY({
-      if (!bytecode()->has_index_u4(code()))
+      if (!has_index_u4(code()))
         index -= constantPoolOopDesc::CPCACHE_INDEX_TAG;
     });
   return _method->constants()->cache()->entry_at(index)->constant_pool_index();
@@ -214,13 +189,12 @@
 // Implementation of Bytecode_loadconstant
 
 int Bytecode_loadconstant::raw_index() const {
-  Bytecode* bcp = bytecode();
-  Bytecodes::Code rawc = bcp->code();
+  Bytecodes::Code rawc = code();
   assert(rawc != Bytecodes::_wide, "verifier prevents this");
   if (Bytecodes::java_code(rawc) == Bytecodes::_ldc)
-    return bcp->get_index_u1(rawc);
+    return get_index_u1(rawc);
   else
-    return bcp->get_index_u2(rawc, false);
+    return get_index_u2(rawc, false);
 }
 
 int Bytecode_loadconstant::pool_index() const {
@@ -258,7 +232,7 @@
     case Bytecodes::_lookupswitch:
       { int i = number_of_pairs() - 1;
         while (i-- > 0) {
-          assert(pair_at(i)->match() < pair_at(i+1)->match(), "unsorted table entries");
+          assert(pair_at(i).match() < pair_at(i+1).match(), "unsorted table entries");
         }
       }
       break;
--- a/src/share/vm/interpreter/bytecode.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecode.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,14 +38,20 @@
 # include "bytes_zero.hpp"
 #endif
 
-// Base class for different kinds of abstractions working
-// relative to an objects 'this' pointer.
+class ciBytecodeStream;
+
+// The base class for different kinds of bytecode abstractions.
+// Provides the primitive operations to manipulate code relative
+// to the bcp.
 
-class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
- public:
+class Bytecode: public StackObj {
+ protected:
+  const address   _bcp;
+  const Bytecodes::Code _code;
+
   // Address computation
-  address addr_at            (int offset)        const     { return (address)this + offset; }
-  int     byte_at            (int offset)        const     { return *(addr_at(offset)); }
+  address addr_at            (int offset)        const     { return (address)_bcp + offset; }
+  u_char byte_at(int offset) const               { return *addr_at(offset); }
   address aligned_addr_at    (int offset)        const     { return (address)round_to((intptr_t)addr_at(offset), jintSize); }
   int     aligned_offset     (int offset)        const     { return aligned_addr_at(offset) - addr_at(0); }
 
@@ -54,31 +60,20 @@
   int     get_Java_u4_at     (int offset)        const     { return Bytes::get_Java_u4(addr_at(offset)); }
   int     get_native_u2_at   (int offset)        const     { return Bytes::get_native_u2(addr_at(offset)); }
   int     get_native_u4_at   (int offset)        const     { return Bytes::get_native_u4(addr_at(offset)); }
-};
-
-
-// The base class for different kinds of bytecode abstractions.
-// Provides the primitive operations to manipulate code relative
-// to an objects 'this' pointer.
-// FIXME: Make this a ResourceObj, include the enclosing methodOop, and cache the opcode.
-
-class Bytecode: public ThisRelativeObj {
- protected:
-  u_char byte_at(int offset) const               { return *addr_at(offset); }
-  bool check_must_rewrite(Bytecodes::Code bc) const;
 
  public:
-  // Attributes
-  address bcp() const                            { return addr_at(0); }
-  int instruction_size() const                   { return Bytecodes::length_at(bcp()); }
+  Bytecode(methodOop method, address bcp): _bcp(bcp), _code(Bytecodes::code_at(method, addr_at(0))) {
+    assert(method != NULL, "this form requires a valid methodOop");
+  }
+  // Defined in ciStreams.hpp
+  inline Bytecode(const ciBytecodeStream* stream, address bcp = NULL);
 
-  // Warning: Use code() with caution on live bytecode streams.  4926272
-  Bytecodes::Code code() const                   { return Bytecodes::code_at(addr_at(0)); }
+  // Attributes
+  address bcp() const                            { return _bcp; }
+  int instruction_size() const                   { return Bytecodes::length_for_code_at(_code, bcp()); }
+
+  Bytecodes::Code code() const                   { return _code; }
   Bytecodes::Code java_code() const              { return Bytecodes::java_code(code()); }
-  bool must_rewrite(Bytecodes::Code code) const  { return Bytecodes::can_rewrite(code) && check_must_rewrite(code); }
-
-  // Creation
-  inline friend Bytecode* Bytecode_at(address bcp);
 
   // Static functions for parsing bytecodes in place.
   int get_index_u1(Bytecodes::Code bc) const {
@@ -89,7 +84,7 @@
     assert_same_format_as(bc, is_wide); assert_index_size(2, bc, is_wide);
     address p = addr_at(is_wide ? 2 : 1);
     if (can_use_native_byte_order(bc, is_wide))
-          return Bytes::get_native_u2(p);
+      return Bytes::get_native_u2(p);
     else  return Bytes::get_Java_u2(p);
   }
   int get_index_u1_cpcache(Bytecodes::Code bc) const {
@@ -138,20 +133,17 @@
   }
 };
 
-inline Bytecode* Bytecode_at(address bcp) {
-  // Warning: Use with caution on live bytecode streams.  4926272
-  return (Bytecode*)bcp;
-}
-
 
 // Abstractions for lookupswitch bytecode
-
-class LookupswitchPair: ThisRelativeObj {
+class LookupswitchPair VALUE_OBJ_CLASS_SPEC {
  private:
-  int  _match;
-  int  _offset;
+  const address _bcp;
+
+  address addr_at            (int offset)        const     { return _bcp + offset; }
+  int     get_Java_u4_at     (int offset)        const     { return Bytes::get_Java_u4(addr_at(offset)); }
 
  public:
+  LookupswitchPair(address bcp): _bcp(bcp) {}
   int  match() const                             { return get_Java_u4_at(0 * jintSize); }
   int  offset() const                            { return get_Java_u4_at(1 * jintSize); }
 };
@@ -159,26 +151,25 @@
 
 class Bytecode_lookupswitch: public Bytecode {
  public:
+  Bytecode_lookupswitch(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
+  // Defined in ciStreams.hpp
+  inline Bytecode_lookupswitch(const ciBytecodeStream* stream);
   void verify() const PRODUCT_RETURN;
 
   // Attributes
   int  default_offset() const                    { return get_Java_u4_at(aligned_offset(1 + 0*jintSize)); }
   int  number_of_pairs() const                   { return get_Java_u4_at(aligned_offset(1 + 1*jintSize)); }
-  LookupswitchPair* pair_at(int i) const         { assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
-                                                   return (LookupswitchPair*)aligned_addr_at(1 + (1 + i)*2*jintSize); }
-  // Creation
-  inline friend Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp);
+  LookupswitchPair pair_at(int i) const          {
+    assert(0 <= i && i < number_of_pairs(), "pair index out of bounds");
+    return LookupswitchPair(aligned_addr_at(1 + (1 + i)*2*jintSize));
+  }
 };
 
-inline Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp) {
-  Bytecode_lookupswitch* b = (Bytecode_lookupswitch*)bcp;
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 class Bytecode_tableswitch: public Bytecode {
  public:
+  Bytecode_tableswitch(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
+  // Defined in ciStreams.hpp
+  inline Bytecode_tableswitch(const ciBytecodeStream* stream);
   void verify() const PRODUCT_RETURN;
 
   // Attributes
@@ -187,52 +178,36 @@
   int  high_key() const                          { return get_Java_u4_at(aligned_offset(1 + 2*jintSize)); }
   int  dest_offset_at(int i) const;
   int  length()                                  { return high_key()-low_key()+1; }
-
-  // Creation
-  inline friend Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp);
 };
 
-inline Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp) {
-  Bytecode_tableswitch* b = (Bytecode_tableswitch*)bcp;
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 // Common code for decoding invokes and field references.
 
-class Bytecode_member_ref: public ResourceObj {
+class Bytecode_member_ref: public Bytecode {
  protected:
-  methodHandle _method;                          // method containing the bytecode
-  int          _bci;                             // position of the bytecode
+  const methodHandle _method;                          // method containing the bytecode
 
-  Bytecode_member_ref(methodHandle method, int bci)  : _method(method), _bci(bci) {}
+  Bytecode_member_ref(methodHandle method, int bci)  : Bytecode(method(), method()->bcp_from(bci)), _method(method) {}
+
+  methodHandle method() const                    { return _method; }
 
  public:
-  // Attributes
-  methodHandle method() const                    { return _method; }
-  int          bci() const                       { return _bci; }
-  address      bcp() const                       { return _method->bcp_from(bci()); }
-  Bytecode*    bytecode() const                  { return Bytecode_at(bcp()); }
-
   int          index() const;                    // cache index (loaded from instruction)
   int          pool_index() const;               // constant pool index
   symbolOop    name() const;                     // returns the name of the method or field
   symbolOop    signature() const;                // returns the signature of the method or field
 
   BasicType    result_type(Thread* thread) const; // returns the result type of the getfield or invoke
-
-  Bytecodes::Code code() const                   { return Bytecodes::code_at(bcp(), _method()); }
-  Bytecodes::Code java_code() const              { return Bytecodes::java_code(code()); }
 };
 
 // Abstraction for invoke_{virtual, static, interface, special}
 
 class Bytecode_invoke: public Bytecode_member_ref {
  protected:
-  Bytecode_invoke(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) {}
+  // Constructor that skips verification
+  Bytecode_invoke(methodHandle method, int bci, bool unused)  : Bytecode_member_ref(method, bci) {}
 
  public:
+  Bytecode_invoke(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) { verify(); }
   void verify() const;
 
   // Attributes
@@ -253,31 +228,20 @@
                                                           is_invokespecial()   ||
                                                           is_invokedynamic(); }
 
-  // Creation
-  inline friend Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci);
-
-  // Like Bytecode_invoke_at. Instead it returns NULL if the bci is not at an invoke.
-  inline friend Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci);
+  // Helper to skip verification.   Used is_valid() to check if the result is really an invoke
+  inline friend Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci);
 };
 
-inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) {
-  Bytecode_invoke* b = new Bytecode_invoke(method, bci);
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-inline Bytecode_invoke* Bytecode_invoke_at_check(methodHandle method, int bci) {
-  Bytecode_invoke* b = new Bytecode_invoke(method, bci);
-  return b->is_valid() ? b : NULL;
+inline Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci) {
+  return Bytecode_invoke(method, bci, false);
 }
 
 
 // Abstraction for all field accesses (put/get field/static)
 class Bytecode_field: public Bytecode_member_ref {
- protected:
-  Bytecode_field(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) {}
+ public:
+  Bytecode_field(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) { verify(); }
 
- public:
   // Testers
   bool is_getfield() const                       { return java_code() == Bytecodes::_getfield; }
   bool is_putfield() const                       { return java_code() == Bytecodes::_putfield; }
@@ -292,131 +256,64 @@
                                                           is_getstatic()  ||
                                                           is_putstatic(); }
   void verify() const;
-
-  // Creation
-  inline friend Bytecode_field* Bytecode_field_at(methodHandle method, int bci);
 };
 
-inline Bytecode_field* Bytecode_field_at(methodHandle method, int bci) {
-  Bytecode_field* b = new Bytecode_field(method, bci);
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 // Abstraction for checkcast
-
 class Bytecode_checkcast: public Bytecode {
  public:
+  Bytecode_checkcast(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
   void verify() const { assert(Bytecodes::java_code(code()) == Bytecodes::_checkcast, "check checkcast"); }
 
   // Returns index
   long index() const   { return get_index_u2(Bytecodes::_checkcast); };
-
-  // Creation
-  inline friend Bytecode_checkcast* Bytecode_checkcast_at(address bcp);
 };
 
-inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) {
-  Bytecode_checkcast* b = (Bytecode_checkcast*)bcp;
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 // Abstraction for instanceof
-
 class Bytecode_instanceof: public Bytecode {
  public:
+  Bytecode_instanceof(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
   void verify() const { assert(code() == Bytecodes::_instanceof, "check instanceof"); }
 
   // Returns index
   long index() const   { return get_index_u2(Bytecodes::_instanceof); };
-
-  // Creation
-  inline friend Bytecode_instanceof* Bytecode_instanceof_at(address bcp);
 };
 
-inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) {
-  Bytecode_instanceof* b = (Bytecode_instanceof*)bcp;
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 class Bytecode_new: public Bytecode {
  public:
+  Bytecode_new(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
   void verify() const { assert(java_code() == Bytecodes::_new, "check new"); }
 
   // Returns index
   long index() const   { return get_index_u2(Bytecodes::_new); };
-
-  // Creation
-  inline friend Bytecode_new* Bytecode_new_at(address bcp);
 };
 
-inline Bytecode_new* Bytecode_new_at(address bcp) {
-  Bytecode_new* b = (Bytecode_new*)bcp;
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 class Bytecode_multianewarray: public Bytecode {
  public:
+  Bytecode_multianewarray(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
   void verify() const { assert(java_code() == Bytecodes::_multianewarray, "check new"); }
 
   // Returns index
   long index() const   { return get_index_u2(Bytecodes::_multianewarray); };
-
-  // Creation
-  inline friend Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp);
 };
 
-inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) {
-  Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp;
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 class Bytecode_anewarray: public Bytecode {
  public:
+  Bytecode_anewarray(methodOop method, address bcp): Bytecode(method, bcp) { verify(); }
   void verify() const { assert(java_code() == Bytecodes::_anewarray, "check anewarray"); }
 
   // Returns index
   long index() const   { return get_index_u2(Bytecodes::_anewarray); };
-
-  // Creation
-  inline friend Bytecode_anewarray* Bytecode_anewarray_at(address bcp);
 };
 
-inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) {
-  Bytecode_anewarray* b = (Bytecode_anewarray*)bcp;
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
-
 // Abstraction for ldc, ldc_w and ldc2_w
-
-class Bytecode_loadconstant: public ResourceObj {
+class Bytecode_loadconstant: public Bytecode {
  private:
-  int          _bci;
-  methodHandle _method;
-
-  Bytecodes::Code code() const                   { return bytecode()->code(); }
+  const methodHandle _method;
 
   int raw_index() const;
 
-  Bytecode_loadconstant(methodHandle method, int bci) : _method(method), _bci(bci) {}
-
  public:
-  // Attributes
-  methodHandle method() const                    { return _method; }
-  int          bci() const                       { return _bci; }
-  address      bcp() const                       { return _method->bcp_from(bci()); }
-  Bytecode*    bytecode() const                  { return Bytecode_at(bcp()); }
+  Bytecode_loadconstant(methodHandle method, int bci): Bytecode(method(), method->bcp_from(bci)), _method(method) { verify(); }
 
   void verify() const {
     assert(_method.not_null(), "must supply method");
@@ -437,15 +334,6 @@
   BasicType result_type() const;        // returns the result type of the ldc
 
   oop resolve_constant(TRAPS) const;
-
-  // Creation
-  inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci);
 };
 
-inline Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci) {
-  Bytecode_loadconstant* b = new Bytecode_loadconstant(method, bci);
-  DEBUG_ONLY(b->verify());
-  return b;
-}
-
 #endif // SHARE_VM_INTERPRETER_BYTECODE_HPP
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -831,11 +831,11 @@
       // much like trying to deopt at a poll return. In that has we simply
       // get out of here
       //
-      if ( Bytecodes::code_at(pc, METHOD) == Bytecodes::_return_register_finalizer) {
+      if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
         // this will do the right thing even if an exception is pending.
         goto handle_return;
       }
-      UPDATE_PC(Bytecodes::length_at(pc));
+      UPDATE_PC(Bytecodes::length_at(METHOD, pc));
       if (THREAD->has_pending_exception()) goto handle_exception;
       goto run;
     }
--- a/src/share/vm/interpreter/bytecodeStream.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecodeStream.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,7 @@
     // in raw mode, pretend indy is "bJJ__"
     assert(size == 2, "raw invokedynamic instruction has 2-byte index only");
   } else {
-    bytecode()->assert_index_size(size, raw_code(), is_wide());
+    bytecode().assert_index_size(size, raw_code(), is_wide());
   }
 }
 
--- a/src/share/vm/interpreter/bytecodeStream.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecodeStream.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -105,14 +105,14 @@
   bool            is_last_bytecode() const       { return _next_bci >= _end_bci; }
 
   address         bcp() const                    { return method()->code_base() + _bci; }
-  Bytecode*       bytecode() const               { return Bytecode_at(bcp()); }
+  Bytecode        bytecode() const               { return Bytecode(_method(), bcp()); }
 
   // State changes
   void            set_next_bci(int bci)          { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
 
   // Bytecode-specific attributes
-  int             dest() const                   { return bci() + bytecode()->get_offset_s2(raw_code()); }
-  int             dest_w() const                 { return bci() + bytecode()->get_offset_s4(raw_code()); }
+  int             dest() const                   { return bci() + bytecode().get_offset_s2(raw_code()); }
+  int             dest_w() const                 { return bci() + bytecode().get_offset_s4(raw_code()); }
 
   // One-byte indices.
   int             get_index_u1() const           { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
@@ -189,7 +189,7 @@
     } else {
       // get bytecode
       address bcp = this->bcp();
-      raw_code = Bytecodes::code_at(bcp);
+      raw_code = Bytecodes::code_at(_method(), bcp);
       code = Bytecodes::java_code(raw_code);
       // set next bytecode position
       //
@@ -197,7 +197,7 @@
       // tty bytecode otherwise the stepping is wrong!
       // (carefull: length_for(...) must be used first!)
       int l = Bytecodes::length_for(code);
-      if (l == 0) l = Bytecodes::length_at(bcp);
+      if (l == 0) l = Bytecodes::length_at(_method(), bcp);
       _next_bci  += l;
       assert(_bci < _next_bci, "length must be > 0");
       // set attributes
@@ -219,16 +219,16 @@
   Bytecodes::Code code() const                   { return _code; }
 
   // Unsigned indices, widening
-  int             get_index() const              { return is_wide() ? bytecode()->get_index_u2(raw_code(), true) : get_index_u1(); }
+  int             get_index() const              { return is_wide() ? bytecode().get_index_u2(raw_code(), true) : get_index_u1(); }
   // Get an unsigned 2-byte index, swapping the bytes if necessary.
   int             get_index_u2() const           { assert_raw_stream(false);
-                                                   return bytecode()->get_index_u2(raw_code(), false); }
+                                                   return bytecode().get_index_u2(raw_code(), false); }
   // Get an unsigned 2-byte index in native order.
   int             get_index_u2_cpcache() const   { assert_raw_stream(false);
-                                                   return bytecode()->get_index_u2_cpcache(raw_code()); }
+                                                   return bytecode().get_index_u2_cpcache(raw_code()); }
   int             get_index_u4() const           { assert_raw_stream(false);
-                                                   return bytecode()->get_index_u4(raw_code()); }
-  bool            has_index_u4() const           { return bytecode()->has_index_u4(raw_code()); }
+                                                   return bytecode().get_index_u4(raw_code()); }
+  bool            has_index_u4() const           { return bytecode().has_index_u4(raw_code()); }
 };
 
 #endif // SHARE_VM_INTERPRETER_BYTECODESTREAM_HPP
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,9 +100,9 @@
     Bytecodes::Code code;
     if (is_wide()) {
       // bcp wasn't advanced if previous bytecode was _wide.
-      code = Bytecodes::code_at(bcp+1);
+      code = Bytecodes::code_at(method(), bcp+1);
     } else {
-      code = Bytecodes::code_at(bcp);
+      code = Bytecodes::code_at(method(), bcp);
     }
     _code = code;
      int bci = bcp - method->code_base();
@@ -127,11 +127,11 @@
   void trace(methodHandle method, address bcp, outputStream* st) {
     _current_method = method();
     ResourceMark rm;
-    Bytecodes::Code code = Bytecodes::code_at(bcp);
+    Bytecodes::Code code = Bytecodes::code_at(method(), bcp);
     // Set is_wide
     _is_wide = (code == Bytecodes::_wide);
     if (is_wide()) {
-      code = Bytecodes::code_at(bcp+1);
+      code = Bytecodes::code_at(method(), bcp+1);
     }
     _code = code;
     int bci = bcp - method->code_base();
--- a/src/share/vm/interpreter/bytecodes.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecodes.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,18 +54,46 @@
 Bytecodes::Code Bytecodes::_java_code     [Bytecodes::number_of_codes];
 u_short         Bytecodes::_flags         [(1<<BitsPerByte)*2];
 
+#ifdef ASSERT
+bool Bytecodes::check_method(const methodOopDesc* method, address bcp) {
+  return method->contains(bcp);
+}
+#endif
+
+bool Bytecodes::check_must_rewrite(Bytecodes::Code code) {
+  assert(can_rewrite(code), "post-check only");
+
+  // Some codes are conditionally rewriting.  Look closely at them.
+  switch (code) {
+  case Bytecodes::_aload_0:
+    // Even if RewriteFrequentPairs is turned on,
+    // the _aload_0 code might delay its rewrite until
+    // a following _getfield rewrites itself.
+    return false;
+
+  case Bytecodes::_lookupswitch:
+    return false;  // the rewrite is not done by the interpreter
+
+  case Bytecodes::_new:
+    // (Could actually look at the class here, but the profit would be small.)
+    return false;  // the rewrite is not always done
+  }
+
+  // No other special cases.
+  return true;
+}
 
 Bytecodes::Code Bytecodes::code_at(methodOop method, int bci) {
-  return code_at(method->bcp_from(bci), method);
+  return code_at(method, method->bcp_from(bci));
 }
 
-Bytecodes::Code Bytecodes::non_breakpoint_code_at(address bcp, methodOop method) {
-  if (method == NULL)  method = methodOopDesc::method_from_bcp(bcp);
+Bytecodes::Code Bytecodes::non_breakpoint_code_at(const methodOopDesc* method, address bcp) {
+  assert(method != NULL, "must have the method for breakpoint conversion");
+  assert(method->contains(bcp), "must be valid bcp in method");
   return method->orig_bytecode_at(method->bci_from(bcp));
 }
 
-int Bytecodes::special_length_at(address bcp, address end) {
-  Code code = code_at(bcp);
+int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end) {
   switch (code) {
   case _wide:
     if (end != NULL && bcp + 1 >= end) {
@@ -120,7 +148,7 @@
   if (code == _breakpoint) {
     return 1;
   } else {
-    return special_length_at(bcp, end);
+    return special_length_at(code, bcp, end);
   }
 }
 
--- a/src/share/vm/interpreter/bytecodes.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/bytecodes.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -342,6 +342,12 @@
   static void        pd_initialize();              // platform specific initialization
   static Code        pd_base_code_for(Code code);  // platform specific base_code_for implementation
 
+  // Verify that bcp points into method
+#ifdef ASSERT
+  static bool        check_method(const methodOopDesc* method, address bcp);
+#endif
+  static bool check_must_rewrite(Bytecodes::Code bc);
+
  public:
   // Conversion
   static void        check          (Code code)    { assert(is_defined(code), "illegal code"); }
@@ -349,22 +355,30 @@
   static Code        cast           (int  code)    { return (Code)code; }
 
 
-   // Fetch a bytecode, hiding breakpoints as necessary:
-   static Code       code_at(address bcp, methodOop method = NULL) {
-          Code code = cast(*bcp); return (code != _breakpoint) ? code : non_breakpoint_code_at(bcp, method);
-   }
-   static Code       java_code_at(address bcp, methodOop method = NULL) {
-          return java_code(code_at(bcp, method));
-   }
+  // Fetch a bytecode, hiding breakpoints as necessary.  The method
+  // argument is used for conversion of breakpoints into the original
+  // bytecode.  The CI uses these methods but guarantees that
+  // breakpoints are hidden so the method argument should be passed as
+  // NULL since in that case the bcp and methodOop are unrelated
+  // memory.
+  static Code       code_at(const methodOopDesc* method, address bcp) {
+    assert(method == NULL || check_method(method, bcp), "bcp must point into method");
+    Code code = cast(*bcp);
+    assert(code != _breakpoint || method != NULL, "need methodOop to decode breakpoint");
+    return (code != _breakpoint) ? code : non_breakpoint_code_at(method, bcp);
+  }
+  static Code       java_code_at(const methodOopDesc* method, address bcp) {
+    return java_code(code_at(method, bcp));
+  }
 
-   // Fetch a bytecode or a breakpoint:
-   static Code       code_or_bp_at(address bcp)    { return (Code)cast(*bcp); }
+  // Fetch a bytecode or a breakpoint:
+  static Code       code_or_bp_at(address bcp)    { return (Code)cast(*bcp); }
 
-   static Code       code_at(methodOop method, int bci);
-   static bool       is_active_breakpoint_at(address bcp) { return (Code)*bcp == _breakpoint; }
+  static Code       code_at(methodOop method, int bci);
+  static bool       is_active_breakpoint_at(address bcp) { return (Code)*bcp == _breakpoint; }
 
-   // find a bytecode, behind a breakpoint if necessary:
-   static Code       non_breakpoint_code_at(address bcp, methodOop method = NULL);
+  // find a bytecode, behind a breakpoint if necessary:
+  static Code       non_breakpoint_code_at(const methodOopDesc* method, address bcp);
 
   // Bytecode attributes
   static bool        is_defined     (int  code)    { return 0 <= code && code < number_of_codes && flags(code, false) != 0; }
@@ -379,14 +393,17 @@
   static bool        can_trap       (Code code)    { check(code);      return has_all_flags(code, _bc_can_trap, false); }
   static Code        java_code      (Code code)    { check(code);      return _java_code     [code]; }
   static bool        can_rewrite    (Code code)    { check(code);      return has_all_flags(code, _bc_can_rewrite, false); }
+  static bool        must_rewrite(Bytecodes::Code code) { return can_rewrite(code) && check_must_rewrite(code); }
   static bool        native_byte_order(Code code)  { check(code);      return has_all_flags(code, _fmt_has_nbo, false); }
   static bool        uses_cp_cache  (Code code)    { check(code);      return has_all_flags(code, _fmt_has_j, false); }
   // if 'end' is provided, it indicates the end of the code buffer which
   // should not be read past when parsing.
-  static int         special_length_at(address bcp, address end = NULL);
+  static int         special_length_at(Bytecodes::Code code, address bcp, address end = NULL);
+  static int         special_length_at(methodOop method, address bcp, address end = NULL) { return special_length_at(code_at(method, bcp), bcp, end); }
   static int         raw_special_length_at(address bcp, address end = NULL);
-  static int         length_at      (address bcp)  { int l = length_for(code_at(bcp)); return l > 0 ? l : special_length_at(bcp); }
-  static int         java_length_at (address bcp)  { int l = length_for(java_code_at(bcp)); return l > 0 ? l : special_length_at(bcp); }
+  static int         length_for_code_at(Bytecodes::Code code, address bcp)  { int l = length_for(code); return l > 0 ? l : special_length_at(code, bcp); }
+  static int         length_at      (methodOop method, address bcp)  { return length_for_code_at(code_at(method, bcp), bcp); }
+  static int         java_length_at (methodOop method, address bcp)  { return length_for_code_at(java_code_at(method, bcp), bcp); }
   static bool        is_java_code   (Code code)    { return 0 <= code && code < number_of_java_codes; }
 
   static bool        is_aload       (Code code)    { return (code == _aload  || code == _aload_0  || code == _aload_1
--- a/src/share/vm/interpreter/interpreter.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/interpreter.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -237,10 +237,9 @@
 // Return true if the interpreter can prove that the given bytecode has
 // not yet been executed (in Java semantics, not in actual operation).
 bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) {
-  address bcp = method->bcp_from(bci);
-  Bytecodes::Code code = Bytecodes::code_at(bcp, method());
+  Bytecodes::Code code = method()->code_at(bci);
 
-  if (!Bytecode_at(bcp)->must_rewrite(code)) {
+  if (!Bytecodes::must_rewrite(code)) {
     // might have been reached
     return false;
   }
@@ -286,12 +285,12 @@
 // If deoptimization happens, this function returns the point of next bytecode to continue execution
 address AbstractInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) {
   assert(method->contains(bcp), "just checkin'");
-  Bytecodes::Code code   = Bytecodes::java_code_at(bcp);
+  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
   assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
   int             bci    = method->bci_from(bcp);
   int             length = -1; // initial value for debugging
   // compute continuation length
-  length = Bytecodes::length_at(bcp);
+  length = Bytecodes::length_at(method, bcp);
   // compute result type
   BasicType type = T_ILLEGAL;
 
@@ -303,7 +302,7 @@
       Thread *thread = Thread::current();
       ResourceMark rm(thread);
       methodHandle mh(thread, method);
-      type = Bytecode_invoke_at(mh, bci)->result_type(thread);
+      type = Bytecode_invoke(mh, bci).result_type(thread);
       // since the cache entry might not be initialized:
       // (NOT needed for the old calling convension)
       if (!is_top_frame) {
@@ -317,7 +316,7 @@
       Thread *thread = Thread::current();
       ResourceMark rm(thread);
       methodHandle mh(thread, method);
-      type = Bytecode_invoke_at(mh, bci)->result_type(thread);
+      type = Bytecode_invoke(mh, bci).result_type(thread);
       // since the cache entry might not be initialized:
       // (NOT needed for the old calling convension)
       if (!is_top_frame) {
@@ -334,7 +333,7 @@
         Thread *thread = Thread::current();
         ResourceMark rm(thread);
         methodHandle mh(thread, method);
-        type = Bytecode_loadconstant_at(mh, bci)->result_type();
+        type = Bytecode_loadconstant(mh, bci).result_type();
         break;
       }
 
@@ -356,7 +355,7 @@
 //       Interpreter::deopt_entry(vtos, 0) like others
 address AbstractInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
   assert(method->contains(bcp), "just checkin'");
-  Bytecodes::Code code   = Bytecodes::java_code_at(bcp);
+  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
 #ifdef COMPILER1
   if(code == Bytecodes::_athrow ) {
     return Interpreter::rethrow_exception_entry();
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -132,9 +132,9 @@
          bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
   ResourceMark rm(thread);
   methodHandle m (thread, method(thread));
-  Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(m, bci(thread));
-  oop result = ldc->resolve_constant(THREAD);
-  DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc->cache_index()));
+  Bytecode_loadconstant ldc(m, bci(thread));
+  oop result = ldc.resolve_constant(THREAD);
+  DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc.cache_index()));
   assert(result == cpce->f1(), "expected result for assembly code");
 }
 IRT_END
@@ -672,8 +672,8 @@
   if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
     ResourceMark rm(thread);
     methodHandle m (thread, method(thread));
-    Bytecode_invoke* call = Bytecode_invoke_at(m, bci(thread));
-    symbolHandle signature (thread, call->signature());
+    Bytecode_invoke call(m, bci(thread));
+    symbolHandle signature (thread, call.signature());
     receiver = Handle(thread,
                   thread->last_frame().interpreter_callee_receiver(signature));
     assert(Universe::heap()->is_in_reserved_or_null(receiver()),
@@ -756,7 +756,7 @@
     caller_bci = caller_method->bci_from(caller_bcp);
     site_index = Bytes::get_native_u4(caller_bcp+1);
   }
-  assert(site_index == InterpreterRuntime::bytecode(thread)->get_index_u4(bytecode), "");
+  assert(site_index == InterpreterRuntime::bytecode(thread).get_index_u4(bytecode), "");
   assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
   // there is a second CPC entries that is of interest; it caches signature info:
   int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
@@ -884,7 +884,7 @@
   return mdo->bci_to_di(bci);
 IRT_END
 
-IRT_ENTRY(jint, InterpreterRuntime::profile_method(JavaThread* thread, address cur_bcp))
+IRT_ENTRY(void, InterpreterRuntime::profile_method(JavaThread* thread))
   // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
   // flag, in case this method triggers classloading which will call into Java.
   UnlockFlagSaver fs(thread);
@@ -893,16 +893,12 @@
   frame fr = thread->last_frame();
   assert(fr.is_interpreted_frame(), "must come from interpreter");
   methodHandle method(thread, fr.interpreter_frame_method());
-  int bci = method->bci_from(cur_bcp);
   methodOopDesc::build_interpreter_method_data(method, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
     CLEAR_PENDING_EXCEPTION;
     // and fall through...
   }
-  methodDataOop mdo = method->method_data();
-  if (mdo == NULL)  return 0;
-  return mdo->bci_to_di(bci);
 IRT_END
 
 
@@ -1245,9 +1241,9 @@
   assert(fr.is_interpreted_frame(), "");
   jint bci = fr.interpreter_frame_bci();
   methodHandle mh(thread, fr.interpreter_frame_method());
-  Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
-  ArgumentSizeComputer asc(invoke->signature());
-  int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
+  Bytecode_invoke invoke(mh, bci);
+  ArgumentSizeComputer asc(invoke.signature());
+  int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver
   Copy::conjoint_jbytes(src_address, dest_address,
                        size_of_arguments * Interpreter::stackElementSize);
 IRT_END
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,16 +58,16 @@
   static void      set_bcp_and_mdp(address bcp, JavaThread*thread);
   static Bytecodes::Code code(JavaThread *thread)    {
     // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
-    return Bytecodes::code_at(bcp(thread), method(thread));
+    return Bytecodes::code_at(method(thread), bcp(thread));
   }
   static bool      already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
-  static Bytecode* bytecode(JavaThread *thread)      { return Bytecode_at(bcp(thread)); }
+  static Bytecode  bytecode(JavaThread *thread)      { return Bytecode(method(thread), bcp(thread)); }
   static int       get_index_u1(JavaThread *thread, Bytecodes::Code bc)
-                                                        { return bytecode(thread)->get_index_u1(bc); }
+                                                        { return bytecode(thread).get_index_u1(bc); }
   static int       get_index_u2(JavaThread *thread, Bytecodes::Code bc)
-                                                        { return bytecode(thread)->get_index_u2(bc); }
+                                                        { return bytecode(thread).get_index_u2(bc); }
   static int       get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
-                                                        { return bytecode(thread)->get_index_u2_cpcache(bc); }
+                                                        { return bytecode(thread).get_index_u2_cpcache(bc); }
   static int       number_of_dimensions(JavaThread *thread)  { return bcp(thread)[3]; }
 
   static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i)  { return method(thread)->constants()->cache()->entry_at(i); }
@@ -164,7 +164,7 @@
 
   // Interpreter profiling support
   static jint    bcp_to_di(methodOopDesc* method, address cur_bcp);
-  static jint    profile_method(JavaThread* thread, address cur_bcp);
+  static void    profile_method(JavaThread* thread);
   static void    update_mdp_for_ret(JavaThread* thread, int bci);
 #ifdef ASSERT
   static void    verify_mdp(methodOopDesc* method, address bcp, address mdp);
--- a/src/share/vm/interpreter/rewriter.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/rewriter.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -221,7 +221,7 @@
       // call to calculate the length.
       bc_length = Bytecodes::length_for(c);
       if (bc_length == 0) {
-        bc_length = Bytecodes::length_at(bcp);
+        bc_length = Bytecodes::length_at(method, bcp);
 
         // length_at will put us at the bytecode after the one modified
         // by 'wide'. We don't currently examine any of the bytecodes
@@ -237,9 +237,9 @@
       switch (c) {
         case Bytecodes::_lookupswitch   : {
 #ifndef CC_INTERP
-          Bytecode_lookupswitch* bc = Bytecode_lookupswitch_at(bcp);
+          Bytecode_lookupswitch bc(method, bcp);
           (*bcp) = (
-            bc->number_of_pairs() < BinarySwitchThreshold
+            bc.number_of_pairs() < BinarySwitchThreshold
             ? Bytecodes::_fast_linearswitch
             : Bytecodes::_fast_binaryswitch
           );
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -592,7 +592,7 @@
 //       that do not return "Interpreter::deopt_entry(vtos, 0)"
 address TemplateInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
   assert(method->contains(bcp), "just checkin'");
-  Bytecodes::Code code   = Bytecodes::java_code_at(bcp);
+  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
   if (code == Bytecodes::_return) {
     // This is used for deopt during registration of finalizers
     // during Object.<init>.  We simply need to resume execution at
--- a/src/share/vm/memory/defNewGeneration.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/memory/defNewGeneration.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -684,23 +684,28 @@
   _preserved_marks_of_objs.clear(true);
 }
 
+void DefNewGeneration::preserve_mark(oop obj, markOop m) {
+  assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
+         "Oversaving!");
+  _objs_with_preserved_marks.push(obj);
+  _preserved_marks_of_objs.push(m);
+}
+
 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
   if (m->must_be_preserved_for_promotion_failure(obj)) {
-    _objs_with_preserved_marks.push(obj);
-    _preserved_marks_of_objs.push(m);
+    preserve_mark(obj, m);
   }
 }
 
 void DefNewGeneration::handle_promotion_failure(oop old) {
-  preserve_mark_if_necessary(old, old->mark());
-  if (!_promotion_failed && PrintPromotionFailure) {
+  if (PrintPromotionFailure && !_promotion_failed) {
     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
                         old->size());
   }
-
+  _promotion_failed = true;
+  preserve_mark_if_necessary(old, old->mark());
   // forward to self
   old->forward_to(old);
-  _promotion_failed = true;
 
   _promo_failure_scan_stack.push(old);
 
--- a/src/share/vm/memory/defNewGeneration.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/memory/defNewGeneration.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -85,6 +85,7 @@
   // Preserve the mark of "obj", if necessary, in preparation for its mark
   // word being overwritten with a self-forwarding-pointer.
   void   preserve_mark_if_necessary(oop obj, markOop m);
+  void   preserve_mark(oop obj, markOop m);    // work routine used by the above
 
   // Together, these keep <object with a preserved mark, mark value> pairs.
   // They should always contain the same number of elements.
--- a/src/share/vm/memory/genMarkSweep.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/memory/genMarkSweep.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -158,6 +158,7 @@
 
   Threads::gc_epilogue();
   CodeCache::gc_epilogue();
+  JvmtiExport::gc_epilogue();
 
   if (PrintGC && !PrintGCDetails) {
     gch->print_heap_change(gch_prev_used);
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,11 @@
 void ThreadLocalAllocBuffer::make_parsable(bool retire) {
   if (end() != NULL) {
     invariants();
+
+    if (retire) {
+      myThread()->incr_allocated_bytes(used_bytes());
+    }
+
     CollectedHeap::fill_with_object(top(), hard_end(), retire);
 
     if (retire || ZeroTLAB) {  // "Reset" the TLAB
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,6 +112,8 @@
   HeapWord* top() const                          { return _top; }
   HeapWord* pf_top() const                       { return _pf_top; }
   size_t desired_size() const                    { return _desired_size; }
+  size_t used() const                            { return pointer_delta(top(), start()); }
+  size_t used_bytes() const                      { return pointer_delta(top(), start(), 1); }
   size_t free() const                            { return pointer_delta(end(), top()); }
   // Don't discard tlab if remaining space is larger than this.
   size_t refill_waste_limit() const              { return _refill_waste_limit; }
--- a/src/share/vm/oops/arrayKlass.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/arrayKlass.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,8 @@
   friend class VMStructs;
  private:
   int      _dimension;         // This is n'th-dimensional array.
-  klassOop _higher_dimension;  // Refers the (n+1)'th-dimensional array (if present).
-  klassOop _lower_dimension;   // Refers the (n-1)'th-dimensional array (if present).
+  volatile klassOop _higher_dimension;  // Refers the (n+1)'th-dimensional array (if present).
+  volatile klassOop _lower_dimension;   // Refers the (n-1)'th-dimensional array (if present).
   int      _vtable_len;        // size of vtable for this klass
   juint    _alloc_size;        // allocation profiling support
   oop      _component_mirror;  // component type, as a java/lang/Class
--- a/src/share/vm/oops/generateOopMap.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/generateOopMap.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -535,23 +535,23 @@
       (*jmpFct)(this, bcs->dest_w(), data);
       break;
     case Bytecodes::_tableswitch:
-      { Bytecode_tableswitch *tableswitch = Bytecode_tableswitch_at(bcs->bcp());
-        int len = tableswitch->length();
+      { Bytecode_tableswitch tableswitch(method(), bcs->bcp());
+        int len = tableswitch.length();
 
-        (*jmpFct)(this, bci + tableswitch->default_offset(), data); /* Default. jump address */
+        (*jmpFct)(this, bci + tableswitch.default_offset(), data); /* Default. jump address */
         while (--len >= 0) {
-          (*jmpFct)(this, bci + tableswitch->dest_offset_at(len), data);
+          (*jmpFct)(this, bci + tableswitch.dest_offset_at(len), data);
         }
         break;
       }
 
     case Bytecodes::_lookupswitch:
-      { Bytecode_lookupswitch *lookupswitch = Bytecode_lookupswitch_at(bcs->bcp());
-        int npairs = lookupswitch->number_of_pairs();
-        (*jmpFct)(this, bci + lookupswitch->default_offset(), data); /* Default. */
+      { Bytecode_lookupswitch lookupswitch(method(), bcs->bcp());
+        int npairs = lookupswitch.number_of_pairs();
+        (*jmpFct)(this, bci + lookupswitch.default_offset(), data); /* Default. */
         while(--npairs >= 0) {
-          LookupswitchPair *pair = lookupswitch->pair_at(npairs);
-          (*jmpFct)(this, bci + pair->offset(), data);
+          LookupswitchPair pair = lookupswitch.pair_at(npairs);
+          (*jmpFct)(this, bci + pair.offset(), data);
         }
         break;
       }
@@ -977,7 +977,7 @@
 #ifdef ASSERT
     if (blockNum + 1 < bbNo) {
       address bcp = _method->bcp_from(bb->_end_bci);
-      int bc_len = Bytecodes::java_length_at(bcp);
+      int bc_len = Bytecodes::java_length_at(_method(), bcp);
       assert(bb->_end_bci + bc_len == bb[1]._bci, "unmatched bci info in basicblock");
     }
 #endif
@@ -985,7 +985,7 @@
 #ifdef ASSERT
   { BasicBlock *bb = &_basic_blocks[bbNo-1];
     address bcp = _method->bcp_from(bb->_end_bci);
-    int bc_len = Bytecodes::java_length_at(bcp);
+    int bc_len = Bytecodes::java_length_at(_method(), bcp);
     assert(bb->_end_bci + bc_len == _method->code_size(), "wrong end bci");
   }
 #endif
@@ -1837,14 +1837,14 @@
 
 
 void GenerateOopMap::do_ldc(int bci) {
-  Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(method(), bci);
+  Bytecode_loadconstant ldc(method(), bci);
   constantPoolOop cp  = method()->constants();
-  BasicType       bt  = ldc->result_type();
+  BasicType       bt  = ldc.result_type();
   CellTypeState   cts = (bt == T_OBJECT) ? CellTypeState::make_line_ref(bci) : valCTS;
   // Make sure bt==T_OBJECT is the same as old code (is_pointer_entry).
   // Note that CONSTANT_MethodHandle entries are u2 index pairs, not pointer-entries,
   // and they are processed by _fast_aldc and the CP cache.
-  assert((ldc->has_cache_index() || cp->is_pointer_entry(ldc->pool_index()))
+  assert((ldc.has_cache_index() || cp->is_pointer_entry(ldc.pool_index()))
          ? (bt == T_OBJECT) : true, "expected object type");
   ppush1(cts);
 }
@@ -2343,7 +2343,7 @@
 bool GenerateOopMap::rewrite_load_or_store(BytecodeStream *bcs, Bytecodes::Code bcN, Bytecodes::Code bc0, unsigned int varNo) {
   assert(bcN == Bytecodes::_astore   || bcN == Bytecodes::_aload,   "wrong argument (bcN)");
   assert(bc0 == Bytecodes::_astore_0 || bc0 == Bytecodes::_aload_0, "wrong argument (bc0)");
-  int ilen = Bytecodes::length_at(bcs->bcp());
+  int ilen = Bytecodes::length_at(_method(), bcs->bcp());
   int newIlen;
 
   if (ilen == 4) {
--- a/src/share/vm/oops/instanceRefKlass.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -457,6 +457,12 @@
   }
 }
 
+bool instanceRefKlass::owns_pending_list_lock(JavaThread* thread) {
+  if (java_lang_ref_Reference::pending_list_lock() == NULL) return false;
+  Handle h_lock(thread, java_lang_ref_Reference::pending_list_lock());
+  return ObjectSynchronizer::current_thread_holds_lock(thread, h_lock);
+}
+
 void instanceRefKlass::acquire_pending_list_lock(BasicLock *pending_list_basic_lock) {
   // we may enter this with pending exception set
   PRESERVE_EXCEPTION_MARK;  // exceptions are never thrown, needed for TRAPS argument
--- a/src/share/vm/oops/instanceRefKlass.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/instanceRefKlass.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -89,6 +89,7 @@
 
   static void release_and_notify_pending_list_lock(BasicLock *pending_list_basic_lock);
   static void acquire_pending_list_lock(BasicLock *pending_list_basic_lock);
+  static bool owns_pending_list_lock(JavaThread* thread);
 
   // Update non-static oop maps so 'referent', 'nextPending' and
   // 'discovered' will look like non-oops
--- a/src/share/vm/oops/klassVtable.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/klassVtable.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -518,18 +518,21 @@
 bool klassVtable::is_miranda(methodOop m, objArrayOop class_methods, klassOop super) {
   symbolOop name = m->name();
   symbolOop signature = m->signature();
+
   if (instanceKlass::find_method(class_methods, name, signature) == NULL) {
-     // did not find it in the method table of the current class
+    // did not find it in the method table of the current class
     if (super == NULL) {
       // super doesn't exist
       return true;
-    } else {
-      if (instanceKlass::cast(super)->lookup_method(name, signature) == NULL) {
-        // super class hierarchy does not implement it
-        return true;
-      }
+    }
+
+    methodOop mo = instanceKlass::cast(super)->lookup_method(name, signature);
+    if (mo == NULL || mo->access_flags().is_private() ) {
+      // super class hierarchy does not implement it or protection is different
+      return true;
     }
   }
+
   return false;
 }
 
--- a/src/share/vm/oops/markOop.inline.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/markOop.inline.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -30,7 +30,7 @@
 #include "oops/markOop.hpp"
 #include "runtime/globals.hpp"
 
-// Should this header be preserved during GC?
+// Should this header be preserved during GC (when biased locking is enabled)?
 inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) const {
   assert(UseBiasedLocking, "unexpected");
   if (has_bias_pattern()) {
@@ -47,14 +47,15 @@
   return (!is_unlocked() || !has_no_hash());
 }
 
+// Should this header be preserved during GC?
 inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
   if (!UseBiasedLocking)
     return (!is_unlocked() || !has_no_hash());
   return must_be_preserved_with_bias(obj_containing_mark);
 }
 
-// Should this header (including its age bits) be preserved in the
-// case of a promotion failure during scavenge?
+// Should this header be preserved in the case of a promotion failure
+// during scavenge (when biased locking is enabled)?
 inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
   assert(UseBiasedLocking, "unexpected");
   // We don't explicitly save off the mark words of biased and
@@ -70,18 +71,20 @@
       prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
     return true;
   }
-  return (this != prototype());
+  return (!is_unlocked() || !has_no_hash());
 }
 
+// Should this header be preserved in the case of a promotion failure
+// during scavenge?
 inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
   if (!UseBiasedLocking)
-    return (this != prototype());
+    return (!is_unlocked() || !has_no_hash());
   return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
 }
 
 
-// Should this header (including its age bits) be preserved in the
-// case of a scavenge in which CMS is the old generation?
+// Same as must_be_preserved_with_bias_for_promotion_failure() except that
+// it takes a klassOop argument, instead of the object of which this is the mark word.
 inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const {
   assert(UseBiasedLocking, "unexpected");
   // CMS scavenges preserve mark words in similar fashion to promotion failures; see above
@@ -89,11 +92,14 @@
       klass_of_obj_containing_mark->klass_part()->prototype_header()->has_bias_pattern()) {
     return true;
   }
-  return (this != prototype());
+  return (!is_unlocked() || !has_no_hash());
 }
+
+// Same as must_be_preserved_for_promotion_failure() except that
+// it takes a klassOop argument, instead of the object of which this is the mark word.
 inline bool markOopDesc::must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const {
   if (!UseBiasedLocking)
-    return (this != prototype());
+    return (!is_unlocked() || !has_no_hash());
   return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
 }
 
--- a/src/share/vm/oops/methodDataOop.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/methodDataOop.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -417,11 +417,11 @@
 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
   int cell_count = 0;
   if (stream->code() == Bytecodes::_tableswitch) {
-    Bytecode_tableswitch* sw = Bytecode_tableswitch_at(stream->bcp());
-    cell_count = 1 + per_case_cell_count * (1 + sw->length()); // 1 for default
+    Bytecode_tableswitch sw(stream->method()(), stream->bcp());
+    cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
   } else {
-    Bytecode_lookupswitch* sw = Bytecode_lookupswitch_at(stream->bcp());
-    cell_count = 1 + per_case_cell_count * (sw->number_of_pairs() + 1); // 1 for default
+    Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
+    cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
   }
   return cell_count;
 }
@@ -434,35 +434,35 @@
   int target_di;
   int offset;
   if (stream->code() == Bytecodes::_tableswitch) {
-    Bytecode_tableswitch* sw = Bytecode_tableswitch_at(stream->bcp());
-    int len = sw->length();
+    Bytecode_tableswitch sw(stream->method()(), stream->bcp());
+    int len = sw.length();
     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
     for (int count = 0; count < len; count++) {
-      target = sw->dest_offset_at(count) + bci();
+      target = sw.dest_offset_at(count) + bci();
       my_di = mdo->dp_to_di(dp());
       target_di = mdo->bci_to_di(target);
       offset = target_di - my_di;
       set_displacement_at(count, offset);
     }
-    target = sw->default_offset() + bci();
+    target = sw.default_offset() + bci();
     my_di = mdo->dp_to_di(dp());
     target_di = mdo->bci_to_di(target);
     offset = target_di - my_di;
     set_default_displacement(offset);
 
   } else {
-    Bytecode_lookupswitch* sw = Bytecode_lookupswitch_at(stream->bcp());
-    int npairs = sw->number_of_pairs();
+    Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
+    int npairs = sw.number_of_pairs();
     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
     for (int count = 0; count < npairs; count++) {
-      LookupswitchPair *pair = sw->pair_at(count);
-      target = pair->offset() + bci();
+      LookupswitchPair pair = sw.pair_at(count);
+      target = pair.offset() + bci();
       my_di = mdo->dp_to_di(dp());
       target_di = mdo->bci_to_di(target);
       offset = target_di - my_di;
       set_displacement_at(count, offset);
     }
-    target = sw->default_offset() + bci();
+    target = sw.default_offset() + bci();
     my_di = mdo->dp_to_di(dp());
     target_di = mdo->bci_to_di(target);
     offset = target_di - my_di;
--- a/src/share/vm/oops/methodOop.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/methodOop.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -150,17 +150,6 @@
   return -1;
 }
 
-methodOop methodOopDesc::method_from_bcp(address bcp) {
-  debug_only(static int count = 0; count++);
-  assert(Universe::heap()->is_in_permanent(bcp), "bcp not in perm_gen");
-  // TO DO: this may be unsafe in some configurations
-  HeapWord* p = Universe::heap()->block_start(bcp);
-  assert(Universe::heap()->block_is_obj(p), "must be obj");
-  assert(oop(p)->is_constMethod(), "not a method");
-  return constMethodOop(p)->method();
-}
-
-
 void methodOopDesc::mask_for(int bci, InterpreterOopMap* mask) {
 
   Thread* myThread    = Thread::current();
@@ -309,6 +298,12 @@
 // Build a methodDataOop object to hold information about this method
 // collected in the interpreter.
 void methodOopDesc::build_interpreter_method_data(methodHandle method, TRAPS) {
+  // Do not profile method if current thread holds the pending list lock,
+  // which avoids deadlock for acquiring the MethodData_lock.
+  if (instanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
+    return;
+  }
+
   // Grab a lock here to prevent multiple
   // methodDataOops from being created.
   MutexLocker ml(MethodData_lock, THREAD);
@@ -463,11 +458,10 @@
 bool methodOopDesc::is_accessor() const {
   if (code_size() != 5) return false;
   if (size_of_parameters() != 1) return false;
-  methodOop m = (methodOop)this;  // pass to code_at() to avoid method_from_bcp
-  if (Bytecodes::java_code_at(code_base()+0, m) != Bytecodes::_aload_0 ) return false;
-  if (Bytecodes::java_code_at(code_base()+1, m) != Bytecodes::_getfield) return false;
-  if (Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_areturn &&
-      Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_ireturn ) return false;
+  if (java_code_at(0) != Bytecodes::_aload_0 ) return false;
+  if (java_code_at(1) != Bytecodes::_getfield) return false;
+  if (java_code_at(4) != Bytecodes::_areturn &&
+      java_code_at(4) != Bytecodes::_ireturn ) return false;
   return true;
 }
 
@@ -1408,7 +1402,7 @@
 }
 
 
-Bytecodes::Code methodOopDesc::orig_bytecode_at(int bci) {
+Bytecodes::Code methodOopDesc::orig_bytecode_at(int bci) const {
   BreakpointInfo* bp = instanceKlass::cast(method_holder())->breakpoints();
   for (; bp != NULL; bp = bp->next()) {
     if (bp->match(this, bci)) {
--- a/src/share/vm/oops/methodOop.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/methodOop.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -196,8 +196,15 @@
   static char* name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature);
   static char* name_and_sig_as_C_string(Klass* klass, symbolOop method_name, symbolOop signature, char* buf, int size);
 
+  Bytecodes::Code java_code_at(int bci) const {
+    return Bytecodes::java_code_at(this, bcp_from(bci));
+  }
+  Bytecodes::Code code_at(int bci) const {
+    return Bytecodes::code_at(this, bcp_from(bci));
+  }
+
   // JVMTI breakpoints
-  Bytecodes::Code orig_bytecode_at(int bci);
+  Bytecodes::Code orig_bytecode_at(int bci) const;
   void        set_orig_bytecode_at(int bci, Bytecodes::Code code);
   void set_breakpoint(int bci);
   void clear_breakpoint(int bci);
@@ -655,8 +662,6 @@
   void set_queued_for_compilation()    { _access_flags.set_queued_for_compilation();     }
   void clear_queued_for_compilation()  { _access_flags.clear_queued_for_compilation();   }
 
-  static methodOop method_from_bcp(address bcp);
-
   // Resolve all classes in signature, return 'true' if successful
   static bool load_signature_classes(methodHandle m, TRAPS);
 
@@ -787,11 +792,11 @@
   void                 set_next(BreakpointInfo* n)    { _next = n; }
 
   // helps for searchers
-  bool match(methodOop m, int bci) {
+  bool match(const methodOopDesc* m, int bci) {
     return bci == _bci && match(m);
   }
 
-  bool match(methodOop m) {
+  bool match(const methodOopDesc* m) {
     return _name_index == m->name_index() &&
       _signature_index == m->signature_index();
   }
--- a/src/share/vm/oops/objArrayKlass.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/objArrayKlass.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -235,8 +235,9 @@
           objArrayKlassKlass::cast(Universe::objArrayKlassKlassObj())->
           allocate_objArray_klass(dimension + 1, this_oop, CHECK_NULL);
         ak = objArrayKlassHandle(THREAD, new_klass);
+        ak->set_lower_dimension(this_oop());
+        OrderAccess::storestore();
         this_oop->set_higher_dimension(ak());
-        ak->set_lower_dimension(this_oop());
         assert(ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");
       }
     }
--- a/src/share/vm/oops/typeArrayKlass.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -179,6 +179,7 @@
           dimension + 1, h_this, CHECK_NULL);
         h_ak = objArrayKlassHandle(THREAD, oak);
         h_ak->set_lower_dimension(h_this());
+        OrderAccess::storestore();
         h_this->set_higher_dimension(h_ak());
         assert(h_ak->oop_is_objArray(), "incorrect initialization of objArrayKlass");
       }
--- a/src/share/vm/opto/macro.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/opto/macro.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1158,7 +1158,7 @@
     // Note: We set the control input on "eden_end" and "old_eden_top" when using
     //       a TLAB to work around a bug where these values were being moved across
     //       a safepoint.  These are not oops, so they cannot be include in the oop
-    //       map, but the can be changed by a GC.   The proper way to fix this would
+    //       map, but they can be changed by a GC.   The proper way to fix this would
     //       be to set the raw memory state when generating a  SafepointNode.  However
     //       this will require extensive changes to the loop optimization in order to
     //       prevent a degradation of the optimization.
@@ -1167,24 +1167,24 @@
 
     // allocate the Region and Phi nodes for the result
     result_region = new (C, 3) RegionNode(3);
-    result_phi_rawmem = new (C, 3) PhiNode( result_region, Type::MEMORY, TypeRawPtr::BOTTOM );
-    result_phi_rawoop = new (C, 3) PhiNode( result_region, TypeRawPtr::BOTTOM );
-    result_phi_i_o    = new (C, 3) PhiNode( result_region, Type::ABIO ); // I/O is used for Prefetch
+    result_phi_rawmem = new (C, 3) PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
+    result_phi_rawoop = new (C, 3) PhiNode(result_region, TypeRawPtr::BOTTOM);
+    result_phi_i_o    = new (C, 3) PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
 
     // We need a Region for the loop-back contended case.
     enum { fall_in_path = 1, contended_loopback_path = 2 };
     Node *contended_region;
     Node *contended_phi_rawmem;
-    if( UseTLAB ) {
+    if (UseTLAB) {
       contended_region = toobig_false;
       contended_phi_rawmem = mem;
     } else {
       contended_region = new (C, 3) RegionNode(3);
-      contended_phi_rawmem = new (C, 3) PhiNode( contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
+      contended_phi_rawmem = new (C, 3) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
       // Now handle the passing-too-big test.  We fall into the contended
       // loop-back merge point.
-      contended_region    ->init_req( fall_in_path, toobig_false );
-      contended_phi_rawmem->init_req( fall_in_path, mem );
+      contended_region    ->init_req(fall_in_path, toobig_false);
+      contended_phi_rawmem->init_req(fall_in_path, mem);
       transform_later(contended_region);
       transform_later(contended_phi_rawmem);
     }
@@ -1192,78 +1192,101 @@
     // Load(-locked) the heap top.
     // See note above concerning the control input when using a TLAB
     Node *old_eden_top = UseTLAB
-      ? new (C, 3) LoadPNode     ( ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM )
-      : new (C, 3) LoadPLockedNode( contended_region, contended_phi_rawmem, eden_top_adr );
+      ? new (C, 3) LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM)
+      : new (C, 3) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr);
 
     transform_later(old_eden_top);
     // Add to heap top to get a new heap top
-    Node *new_eden_top = new (C, 4) AddPNode( top(), old_eden_top, size_in_bytes );
+    Node *new_eden_top = new (C, 4) AddPNode(top(), old_eden_top, size_in_bytes);
     transform_later(new_eden_top);
     // Check for needing a GC; compare against heap end
-    Node *needgc_cmp = new (C, 3) CmpPNode( new_eden_top, eden_end );
+    Node *needgc_cmp = new (C, 3) CmpPNode(new_eden_top, eden_end);
     transform_later(needgc_cmp);
-    Node *needgc_bol = new (C, 2) BoolNode( needgc_cmp, BoolTest::ge );
+    Node *needgc_bol = new (C, 2) BoolNode(needgc_cmp, BoolTest::ge);
     transform_later(needgc_bol);
-    IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
+    IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
     transform_later(needgc_iff);
 
     // Plug the failing-heap-space-need-gc test into the slow-path region
-    Node *needgc_true = new (C, 1) IfTrueNode( needgc_iff );
+    Node *needgc_true = new (C, 1) IfTrueNode(needgc_iff);
     transform_later(needgc_true);
-    if( initial_slow_test ) {
-      slow_region    ->init_req( need_gc_path, needgc_true );
+    if (initial_slow_test) {
+      slow_region->init_req(need_gc_path, needgc_true);
       // This completes all paths into the slow merge point
       transform_later(slow_region);
     } else {                      // No initial slow path needed!
       // Just fall from the need-GC path straight into the VM call.
-      slow_region    = needgc_true;
+      slow_region = needgc_true;
     }
     // No need for a GC.  Setup for the Store-Conditional
-    Node *needgc_false = new (C, 1) IfFalseNode( needgc_iff );
+    Node *needgc_false = new (C, 1) IfFalseNode(needgc_iff);
     transform_later(needgc_false);
 
     // Grab regular I/O before optional prefetch may change it.
     // Slow-path does no I/O so just set it to the original I/O.
-    result_phi_i_o->init_req( slow_result_path, i_o );
+    result_phi_i_o->init_req(slow_result_path, i_o);
 
     i_o = prefetch_allocation(i_o, needgc_false, contended_phi_rawmem,
                               old_eden_top, new_eden_top, length);
 
+    // Name successful fast-path variables
+    Node* fast_oop = old_eden_top;
+    Node* fast_oop_ctrl;
+    Node* fast_oop_rawmem;
+
     // Store (-conditional) the modified eden top back down.
     // StorePConditional produces flags for a test PLUS a modified raw
     // memory state.
-    Node *store_eden_top;
-    Node *fast_oop_ctrl;
-    if( UseTLAB ) {
-      store_eden_top = new (C, 4) StorePNode( needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top );
+    if (UseTLAB) {
+      Node* store_eden_top =
+        new (C, 4) StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr,
+                              TypeRawPtr::BOTTOM, new_eden_top);
       transform_later(store_eden_top);
       fast_oop_ctrl = needgc_false; // No contention, so this is the fast path
+      fast_oop_rawmem = store_eden_top;
     } else {
-      store_eden_top = new (C, 5) StorePConditionalNode( needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, old_eden_top );
+      Node* store_eden_top =
+        new (C, 5) StorePConditionalNode(needgc_false, contended_phi_rawmem, eden_top_adr,
+                                         new_eden_top, fast_oop/*old_eden_top*/);
       transform_later(store_eden_top);
-      Node *contention_check = new (C, 2) BoolNode( store_eden_top, BoolTest::ne );
+      Node *contention_check = new (C, 2) BoolNode(store_eden_top, BoolTest::ne);
       transform_later(contention_check);
       store_eden_top = new (C, 1) SCMemProjNode(store_eden_top);
       transform_later(store_eden_top);
 
       // If not using TLABs, check to see if there was contention.
-      IfNode *contention_iff = new (C, 2) IfNode ( needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN );
+      IfNode *contention_iff = new (C, 2) IfNode (needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN);
       transform_later(contention_iff);
-      Node *contention_true = new (C, 1) IfTrueNode( contention_iff );
+      Node *contention_true = new (C, 1) IfTrueNode(contention_iff);
       transform_later(contention_true);
       // If contention, loopback and try again.
-      contended_region->init_req( contended_loopback_path, contention_true );
-      contended_phi_rawmem->init_req( contended_loopback_path, store_eden_top );
+      contended_region->init_req(contended_loopback_path, contention_true);
+      contended_phi_rawmem->init_req(contended_loopback_path, store_eden_top);
 
       // Fast-path succeeded with no contention!
-      Node *contention_false = new (C, 1) IfFalseNode( contention_iff );
+      Node *contention_false = new (C, 1) IfFalseNode(contention_iff);
       transform_later(contention_false);
       fast_oop_ctrl = contention_false;
+
+      // Bump total allocated bytes for this thread
+      Node* thread = new (C, 1) ThreadLocalNode();
+      transform_later(thread);
+      Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread,
+                                             in_bytes(JavaThread::allocated_bytes_offset()));
+      Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
+                                    0, TypeLong::LONG, T_LONG);
+#ifdef _LP64
+      Node* alloc_size = size_in_bytes;
+#else
+      Node* alloc_size = new (C, 2) ConvI2LNode(size_in_bytes);
+      transform_later(alloc_size);
+#endif
+      Node* new_alloc_bytes = new (C, 3) AddLNode(alloc_bytes, alloc_size);
+      transform_later(new_alloc_bytes);
+      fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
+                                   0, new_alloc_bytes, T_LONG);
     }
 
-    // Rename successful fast-path variables to make meaning more obvious
-    Node* fast_oop        = old_eden_top;
-    Node* fast_oop_rawmem = store_eden_top;
     fast_oop_rawmem = initialize_object(alloc,
                                         fast_oop_ctrl, fast_oop_rawmem, fast_oop,
                                         klass_node, length, size_in_bytes);
@@ -1282,11 +1305,11 @@
 
       call->init_req(TypeFunc::Parms+0, thread);
       call->init_req(TypeFunc::Parms+1, fast_oop);
-      call->init_req( TypeFunc::Control, fast_oop_ctrl );
-      call->init_req( TypeFunc::I_O    , top() )        ;   // does no i/o
-      call->init_req( TypeFunc::Memory , fast_oop_rawmem );
-      call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) );
-      call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) );
+      call->init_req(TypeFunc::Control, fast_oop_ctrl);
+      call->init_req(TypeFunc::I_O    , top()); // does no i/o
+      call->init_req(TypeFunc::Memory , fast_oop_rawmem);
+      call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
+      call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
       transform_later(call);
       fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
       transform_later(fast_oop_ctrl);
@@ -1295,10 +1318,10 @@
     }
 
     // Plug in the successful fast-path into the result merge point
-    result_region    ->init_req( fast_result_path, fast_oop_ctrl );
-    result_phi_rawoop->init_req( fast_result_path, fast_oop );
-    result_phi_i_o   ->init_req( fast_result_path, i_o );
-    result_phi_rawmem->init_req( fast_result_path, fast_oop_rawmem );
+    result_region    ->init_req(fast_result_path, fast_oop_ctrl);
+    result_phi_rawoop->init_req(fast_result_path, fast_oop);
+    result_phi_i_o   ->init_req(fast_result_path, i_o);
+    result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
   } else {
     slow_region = ctrl;
   }
--- a/src/share/vm/opto/type.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/opto/type.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2187,8 +2187,11 @@
   case TypePtr::NotNull:
     return this;
   case TypePtr::Null:
-  case TypePtr::Constant:
-    return make( _bits+offset );
+  case TypePtr::Constant: {
+    address bits = _bits+offset;
+    if ( bits == 0 ) return TypePtr::NULL_PTR;
+    return make( bits );
+  }
   default:  ShouldNotReachHere();
   }
   return NULL;                  // Lint noise
--- a/src/share/vm/prims/jvm.h	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvm.h	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1649,7 +1649,8 @@
      * the new bit is also added in the main/baseline.
      */
     unsigned int thread_park_blocker : 1;
-    unsigned int : 31;
+    unsigned int post_vm_init_hook_enabled : 1;
+    unsigned int : 30;
     unsigned int : 32;
     unsigned int : 32;
 } jdk_version_info;
--- a/src/share/vm/prims/jvmti.xml	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmti.xml	Fri Jan 21 13:03:13 2011 -0800
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="ISO-8859-1"?>
 <?xml-stylesheet type="text/xsl" href="jvmti.xsl"?>
 <!--
- Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -5649,6 +5649,45 @@
       </errors>
     </function>
 
+    <function id="GetLocalInstance" num="155" since="1.2">
+      <synopsis>Get Local Instance</synopsis>
+      <description>
+        This function can be used to retrieve the value of the local object
+        variable at slot 0 (the "<code>this</code>" object) from non-static
+        frames.  This function can retrieve the "<code>this</code>" object from
+        native method frames, whereas <code>GetLocalObject()</code> would 
+        return <code>JVMTI_ERROR_OPAQUE_FRAME</code> in those cases.
+      </description>
+      <origin>new</origin>
+      <capabilities>
+	<required id="can_access_local_variables"></required>
+      </capabilities>
+      <parameters>
+ 	<param id="thread">
+	  <jthread null="current" frame="frame"/>
+	  <description>
+	    The thread of the frame containing the variable's value.
+	  </description>
+	</param>
+	<param id="depth">
+	  <jframeID thread="thread"/>
+	  <description>
+	    The depth of the frame containing the variable's value.
+	  </description>
+	</param>
+	<param id="value_ptr">
+	  <outptr><jobject/></outptr>
+	    <description>
+	      On return, points to the variable's value. 
+	    </description>
+	</param>
+      </parameters>
+      <errors>
+	<error id="JVMTI_ERROR_INVALID_SLOT">
+	  If the specified frame is a static method frame.
+	</error>
+      </errors>
+    </function>
     <function id="GetLocalInt" num="22">
       <synopsis>Get Local Variable - Int</synopsis>
       <description>
@@ -10658,7 +10697,7 @@
 	    <internallink id="mUTF">modified UTF-8</internallink> string.
 	  </description>
 	</param>
-        <param id="value">
+        <param id="value_ptr">
 	  <inbuf>
 	    <char/>
 	    <nullok>
@@ -13009,8 +13048,8 @@
   <event label="Garbage Collection Start"
 	 id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81">
     <description>
-      A Garbage Collection Start event is sent when a full cycle
-      garbage collection begins.
+      A Garbage Collection Start event is sent when a 
+      garbage collection pause begins.
       Only stop-the-world collections are reported--that is, collections during
       which all threads cease to modify the state of the Java virtual machine.
       This means that some collectors will never generate these events.
@@ -13036,8 +13075,8 @@
   <event label="Garbage Collection Finish"
 	 id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82">
     <description>
-      A Garbage Collection Finish event is sent when a full 
-      garbage collection cycle ends.
+      A Garbage Collection Finish event is sent when a
+      garbage collection pause ends.
       This event is sent while the VM is still stopped, thus
       the event handler must not use JNI functions and
       must not use <jvmti/> functions except those which
--- a/src/share/vm/prims/jvmtiEnv.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1796,6 +1796,29 @@
   }
 } /* end GetLocalObject */
 
+// Threads_lock NOT held, java_thread not protected by lock
+// java_thread - pre-checked
+// java_thread - unchecked
+// depth - pre-checked as non-negative
+// value - pre-checked for NULL
+jvmtiError
+JvmtiEnv::GetLocalInstance(JavaThread* java_thread, jint depth, jobject* value_ptr){
+  JavaThread* current_thread = JavaThread::current();
+  // rm object is created to clean up the javaVFrame created in
+  // doit_prologue(), but after doit() is finished with it.
+  ResourceMark rm(current_thread);
+
+  VM_GetReceiver op(java_thread, current_thread, depth);
+  VMThread::execute(&op);
+  jvmtiError err = op.result();
+  if (err != JVMTI_ERROR_NONE) {
+    return err;
+  } else {
+    *value_ptr = op.value().l;
+    return JVMTI_ERROR_NONE;
+  }
+} /* end GetLocalInstance */
+
 
 // Threads_lock NOT held, java_thread not protected by lock
 // java_thread - pre-checked
@@ -3417,12 +3440,12 @@
 // property - pre-checked for NULL
 // value - NULL is a valid value, must be checked
 jvmtiError
-JvmtiEnv::SetSystemProperty(const char* property, const char* value) {
+JvmtiEnv::SetSystemProperty(const char* property, const char* value_ptr) {
   jvmtiError err =JVMTI_ERROR_NOT_AVAILABLE;
 
   for (SystemProperty* p = Arguments::system_properties(); p != NULL; p = p->next()) {
     if (strcmp(property, p->key()) == 0) {
-      if (p->set_value((char *)value)) {
+      if (p->set_value((char *)value_ptr)) {
         err =  JVMTI_ERROR_NONE;
       }
     }
--- a/src/share/vm/prims/jvmtiEventController.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiEventController.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -667,14 +667,13 @@
 JvmtiEventControllerPrivate::thread_ended(JavaThread *thread) {
   // Removes the JvmtiThreadState associated with the specified thread.
   // May be called after all environments have been disposed.
+  assert(JvmtiThreadState_lock->is_locked(), "sanity check");
 
   EC_TRACE(("JVMTI [%s] # thread ended", JvmtiTrace::safe_get_thread_name(thread)));
 
   JvmtiThreadState *state = thread->jvmti_thread_state();
-  if (state != NULL) {
-    MutexLocker mu(JvmtiThreadState_lock);
-    delete state;
-  }
+  assert(state != NULL, "else why are we here?");
+  delete state;
 }
 
 void JvmtiEventControllerPrivate::set_event_callbacks(JvmtiEnvBase *env,
--- a/src/share/vm/prims/jvmtiExport.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiExport.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -2253,12 +2253,14 @@
 
 void JvmtiExport::cleanup_thread(JavaThread* thread) {
   assert(JavaThread::current() == thread, "thread is not current");
-
+  MutexLocker mu(JvmtiThreadState_lock);
 
-  // This has to happen after the thread state is removed, which is
-  // why it is not in post_thread_end_event like its complement
-  // Maybe both these functions should be rolled into the posts?
-  JvmtiEventController::thread_ended(thread);
+  if (thread->jvmti_thread_state() != NULL) {
+    // This has to happen after the thread state is removed, which is
+    // why it is not in post_thread_end_event like its complement
+    // Maybe both these functions should be rolled into the posts?
+    JvmtiEventController::thread_ended(thread);
+  }
 }
 
 void JvmtiExport::oops_do(OopClosure* f) {
@@ -2266,6 +2268,14 @@
   JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(f);
 }
 
+void JvmtiExport::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  JvmtiTagMap::weak_oops_do(is_alive, f);
+}
+
+void JvmtiExport::gc_epilogue() {
+  JvmtiCurrentBreakpoints::gc_epilogue();
+}
+
 // Onload raw monitor transition.
 void JvmtiExport::transition_pending_onload_raw_monitors() {
   JvmtiPendingMonitors::transition_raw_monitors();
@@ -2358,15 +2368,6 @@
 }
 #endif // SERVICES_KERNEL
 
-// CMS has completed referencing processing so may need to update
-// tag maps.
-void JvmtiExport::cms_ref_processing_epilogue() {
-  if (JvmtiEnv::environments_might_exist()) {
-    JvmtiTagMap::cms_ref_processing_epilogue();
-  }
-}
-
-
 ////////////////////////////////////////////////////////////////////////////////////////////////
 
 // Setup current current thread for event collection.
@@ -2536,36 +2537,20 @@
   }
 };
 
-JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) {
-  assert(Thread::current()->is_VM_thread(), "wrong thread");
-
+JvmtiGCMarker::JvmtiGCMarker() {
   // if there aren't any JVMTI environments then nothing to do
   if (!JvmtiEnv::environments_might_exist()) {
     return;
   }
 
-  if (ForceFullGCJVMTIEpilogues) {
-    // force 'Full GC' was done semantics for JVMTI GC epilogues
-    _full = true;
-  }
-
-  // GarbageCollectionStart event posted from VM thread - okay because
-  // JVMTI is clear that the "world is stopped" and callback shouldn't
-  // try to call into the VM.
   if (JvmtiExport::should_post_garbage_collection_start()) {
     JvmtiExport::post_garbage_collection_start();
   }
 
-  // if "full" is false it probably means this is a scavenge of the young
-  // generation. However it could turn out that a "full" GC is required
-  // so we record the number of collections so that it can be checked in
-  // the destructor.
-  if (!_full) {
-    _invocation_count = Universe::heap()->total_full_collections();
+  if (SafepointSynchronize::is_at_safepoint()) {
+    // Do clean up tasks that need to be done at a safepoint
+    JvmtiEnvBase::check_for_periodic_clean_up();
   }
-
-  // Do clean up tasks that need to be done at a safepoint
-  JvmtiEnvBase::check_for_periodic_clean_up();
 }
 
 JvmtiGCMarker::~JvmtiGCMarker() {
@@ -2578,21 +2563,5 @@
   if (JvmtiExport::should_post_garbage_collection_finish()) {
     JvmtiExport::post_garbage_collection_finish();
   }
-
-  // we might have initially started out doing a scavenge of the young
-  // generation but could have ended up doing a "full" GC - check the
-  // GC count to see.
-  if (!_full) {
-    _full = (_invocation_count != Universe::heap()->total_full_collections());
-  }
-
-  // Full collection probably means the perm generation has been GC'ed
-  // so we clear the breakpoint cache.
-  if (_full) {
-    JvmtiCurrentBreakpoints::gc_epilogue();
-  }
-
-  // Notify heap/object tagging support
-  JvmtiTagMap::gc_epilogue(_full);
 }
 #endif // JVMTI_KERNEL
--- a/src/share/vm/prims/jvmtiExport.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiExport.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -346,6 +346,8 @@
   static void cleanup_thread             (JavaThread* thread) KERNEL_RETURN;
 
   static void oops_do(OopClosure* f) KERNEL_RETURN;
+  static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) KERNEL_RETURN;
+  static void gc_epilogue() KERNEL_RETURN;
 
   static void transition_pending_onload_raw_monitors() KERNEL_RETURN;
 
@@ -356,9 +358,6 @@
 
   // SetNativeMethodPrefix support
   static char** get_all_native_method_prefixes(int* count_ptr);
-
-  // call after CMS has completed referencing processing
-  static void cms_ref_processing_epilogue() KERNEL_RETURN;
 };
 
 // Support class used by JvmtiDynamicCodeEventCollector and others. It
@@ -492,55 +491,11 @@
 
 // Base class for reporting GC events to JVMTI.
 class JvmtiGCMarker : public StackObj {
- private:
-  bool _full;                           // marks a "full" GC
-  unsigned int _invocation_count;       // GC invocation count
- protected:
-  JvmtiGCMarker(bool full) KERNEL_RETURN;       // protected
-  ~JvmtiGCMarker() KERNEL_RETURN;               // protected
+ public:
+  JvmtiGCMarker() KERNEL_RETURN;
+  ~JvmtiGCMarker() KERNEL_RETURN;
 };
 
-
-// Support class used to report GC events to JVMTI. The class is stack
-// allocated and should be placed in the doit() implementation of all
-// vm operations that do a stop-the-world GC for failed allocation.
-//
-// Usage :-
-//
-// void VM_GenCollectForAllocation::doit() {
-//   JvmtiGCForAllocationMarker jgcm;
-//   :
-// }
-//
-// If jvmti is not enabled the constructor and destructor is essentially
-// a no-op (no overhead).
-//
-class JvmtiGCForAllocationMarker : public JvmtiGCMarker {
- public:
-  JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) {
-  }
-};
-
-// Support class used to report GC events to JVMTI. The class is stack
-// allocated and should be placed in the doit() implementation of all
-// vm operations that do a "full" stop-the-world GC. This class differs
-// from JvmtiGCForAllocationMarker in that this class assumes that a
-// "full" GC will happen.
-//
-// Usage :-
-//
-// void VM_GenCollectFull::doit() {
-//   JvmtiGCFullMarker jgcm;
-//   :
-// }
-//
-class JvmtiGCFullMarker : public JvmtiGCMarker {
- public:
-  JvmtiGCFullMarker() : JvmtiGCMarker(true) {
-  }
-};
-
-
 // JvmtiHideSingleStepping is a helper class for hiding
 // internal single step events.
 class JvmtiHideSingleStepping : public StackObj {
--- a/src/share/vm/prims/jvmtiImpl.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -217,7 +217,6 @@
 
 void GrowableCache::gc_epilogue() {
   int len = _elements->length();
-  // recompute the new cache value after GC
   for (int i=0; i<len; i++) {
     _cache[i] = _elements->at(i)->getCacheValue();
   }
@@ -401,7 +400,7 @@
   _bps.oops_do(f);
 }
 
-void  JvmtiBreakpoints::gc_epilogue() {
+void JvmtiBreakpoints::gc_epilogue() {
   _bps.gc_epilogue();
 }
 
@@ -540,7 +539,6 @@
   }
 }
 
-
 ///////////////////////////////////////////////////////////////
 //
 // class VM_GetOrSetLocal
@@ -586,7 +584,6 @@
 {
 }
 
-
 vframe *VM_GetOrSetLocal::get_vframe() {
   if (!_thread->has_last_Java_frame()) {
     return NULL;
@@ -609,7 +606,7 @@
   }
   javaVFrame *jvf = (javaVFrame*)vf;
 
-  if (!vf->is_java_frame() || jvf->method()->is_native()) {
+  if (!vf->is_java_frame()) {
     _result = JVMTI_ERROR_OPAQUE_FRAME;
     return NULL;
   }
@@ -740,6 +737,15 @@
   _jvf = get_java_vframe();
   NULL_CHECK(_jvf, false);
 
+  if (_jvf->method()->is_native()) {
+    if (getting_receiver() && !_jvf->method()->is_static()) {
+      return true;
+    } else {
+      _result = JVMTI_ERROR_OPAQUE_FRAME;
+      return false;
+    }
+  }
+
   if (!check_slot_type(_jvf)) {
     return false;
   }
@@ -781,40 +787,46 @@
     HandleMark hm;
 
     switch (_type) {
-    case T_INT:    locals->set_int_at   (_index, _value.i); break;
-    case T_LONG:   locals->set_long_at  (_index, _value.j); break;
-    case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
-    case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
-    case T_OBJECT: {
-      Handle ob_h(JNIHandles::resolve_external_guard(_value.l));
-      locals->set_obj_at (_index, ob_h);
-      break;
-    }
-    default: ShouldNotReachHere();
+      case T_INT:    locals->set_int_at   (_index, _value.i); break;
+      case T_LONG:   locals->set_long_at  (_index, _value.j); break;
+      case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
+      case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
+      case T_OBJECT: {
+        Handle ob_h(JNIHandles::resolve_external_guard(_value.l));
+        locals->set_obj_at (_index, ob_h);
+        break;
+      }
+      default: ShouldNotReachHere();
     }
     _jvf->set_locals(locals);
   } else {
-    StackValueCollection *locals = _jvf->locals();
+    if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
+      assert(getting_receiver(), "Can only get here when getting receiver");
+      oop receiver = _jvf->fr().get_native_receiver();
+      _value.l = JNIHandles::make_local(_calling_thread, receiver);
+    } else {
+      StackValueCollection *locals = _jvf->locals();
 
-    if (locals->at(_index)->type() == T_CONFLICT) {
-      memset(&_value, 0, sizeof(_value));
-      _value.l = NULL;
-      return;
-    }
+      if (locals->at(_index)->type() == T_CONFLICT) {
+        memset(&_value, 0, sizeof(_value));
+        _value.l = NULL;
+        return;
+      }
 
-    switch (_type) {
-    case T_INT:    _value.i = locals->int_at   (_index);   break;
-    case T_LONG:   _value.j = locals->long_at  (_index);   break;
-    case T_FLOAT:  _value.f = locals->float_at (_index);   break;
-    case T_DOUBLE: _value.d = locals->double_at(_index);   break;
-    case T_OBJECT: {
-      // Wrap the oop to be returned in a local JNI handle since
-      // oops_do() no longer applies after doit() is finished.
-      oop obj = locals->obj_at(_index)();
-      _value.l = JNIHandles::make_local(_calling_thread, obj);
-      break;
-    }
-    default: ShouldNotReachHere();
+      switch (_type) {
+        case T_INT:    _value.i = locals->int_at   (_index);   break;
+        case T_LONG:   _value.j = locals->long_at  (_index);   break;
+        case T_FLOAT:  _value.f = locals->float_at (_index);   break;
+        case T_DOUBLE: _value.d = locals->double_at(_index);   break;
+        case T_OBJECT: {
+          // Wrap the oop to be returned in a local JNI handle since
+          // oops_do() no longer applies after doit() is finished.
+          oop obj = locals->obj_at(_index)();
+          _value.l = JNIHandles::make_local(_calling_thread, obj);
+          break;
+        }
+        default: ShouldNotReachHere();
+      }
     }
   }
 }
@@ -825,6 +837,10 @@
 }
 
 
+VM_GetReceiver::VM_GetReceiver(
+    JavaThread* thread, JavaThread* caller_thread, jint depth)
+    : VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
+
 /////////////////////////////////////////////////////////////////////////////////////////
 
 //
--- a/src/share/vm/prims/jvmtiImpl.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiImpl.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -117,6 +117,7 @@
   void clear();
   // apply f to every element and update the cache
   void oops_do(OopClosure* f);
+  // update the cache after a full gc
   void gc_epilogue();
 };
 
@@ -278,13 +279,13 @@
 
   int length();
   void oops_do(OopClosure* f);
-  void gc_epilogue();
   void print();
 
   int  set(JvmtiBreakpoint& bp);
   int  clear(JvmtiBreakpoint& bp);
   void clearall_in_class_at_safepoint(klassOop klass);
   void clearall();
+  void gc_epilogue();
 };
 
 
@@ -355,7 +356,7 @@
 // to the thread simultaneously.
 //
 class VM_GetOrSetLocal : public VM_Operation {
-private:
+ protected:
   JavaThread* _thread;
   JavaThread* _calling_thread;
   jint        _depth;
@@ -365,6 +366,10 @@
   javaVFrame* _jvf;
   bool        _set;
 
+  // It is possible to get the receiver out of a non-static native wrapper
+  // frame.  Use VM_GetReceiver to do this.
+  virtual bool getting_receiver() const { return false; }
+
   jvmtiError  _result;
 
   vframe* get_vframe();
@@ -395,6 +400,15 @@
   static bool is_assignable(const char* ty_sign, Klass* klass, Thread* thread);
 };
 
+class VM_GetReceiver : public VM_GetOrSetLocal {
+ protected:
+  virtual bool getting_receiver() const { return true; }
+
+ public:
+  VM_GetReceiver(JavaThread* thread, JavaThread* calling_thread, jint depth);
+  const char* name() const                       { return "get receiver"; }
+};
+
 
 ///////////////////////////////////////////////////////////////
 //
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1458,7 +1458,7 @@
     if (bc_length == 0) {
       // More complicated bytecodes report a length of zero so
       // we have to try again a slightly different way.
-      bc_length = Bytecodes::length_at(bcp);
+      bc_length = Bytecodes::length_at(method(), bcp);
     }
 
     assert(bc_length != 0, "impossible bytecode length");
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -50,7 +50,7 @@
 
 // JvmtiTagHashmapEntry
 //
-// Each entry encapsulates a JNI weak reference to the tagged object
+// Each entry encapsulates a reference to the tagged object
 // and the tag value. In addition an entry includes a next pointer which
 // is used to chain entries together.
 
@@ -58,24 +58,25 @@
  private:
   friend class JvmtiTagMap;
 
-  jweak _object;                        // JNI weak ref to tagged object
+  oop _object;                          // tagged object
   jlong _tag;                           // the tag
   JvmtiTagHashmapEntry* _next;          // next on the list
 
-  inline void init(jweak object, jlong tag) {
+  inline void init(oop object, jlong tag) {
     _object = object;
     _tag = tag;
     _next = NULL;
   }
 
   // constructor
-  JvmtiTagHashmapEntry(jweak object, jlong tag)         { init(object, tag); }
+  JvmtiTagHashmapEntry(oop object, jlong tag)         { init(object, tag); }
 
  public:
 
   // accessor methods
-  inline jweak object() const                           { return _object; }
-  inline jlong tag() const                              { return _tag; }
+  inline oop object() const                           { return _object; }
+  inline oop* object_addr()                           { return &_object; }
+  inline jlong tag() const                            { return _tag; }
 
   inline void set_tag(jlong tag) {
     assert(tag != 0, "can't be zero");
@@ -92,9 +93,7 @@
 // A hashmap is essentially a table of pointers to entries. Entries
 // are hashed to a location, or position in the table, and then
 // chained from that location. The "key" for hashing is address of
-// the object, or oop. The "value" is the JNI weak reference to the
-// object and the tag value. Keys are not stored with the entry.
-// Instead the weak reference is resolved to obtain the key.
+// the object, or oop. The "value" is the tag value.
 //
 // A hashmap maintains a count of the number entries in the hashmap
 // and resizes if the number of entries exceeds a given threshold.
@@ -206,7 +205,7 @@
       JvmtiTagHashmapEntry* entry = _table[i];
       while (entry != NULL) {
         JvmtiTagHashmapEntry* next = entry->next();
-        oop key = JNIHandles::resolve(entry->object());
+        oop key = entry->object();
         assert(key != NULL, "jni weak reference cleared!!");
         unsigned int h = hash(key, new_size);
         JvmtiTagHashmapEntry* anchor = new_table[h];
@@ -299,14 +298,12 @@
     unsigned int h = hash(key);
     JvmtiTagHashmapEntry* entry = _table[h];
     while (entry != NULL) {
-      oop orig_key = JNIHandles::resolve(entry->object());
-      assert(orig_key != NULL, "jni weak reference cleared!!");
-      if (key == orig_key) {
-        break;
+      if (entry->object() == key) {
+         return entry;
       }
       entry = entry->next();
     }
-    return entry;
+    return NULL;
   }
 
 
@@ -343,9 +340,7 @@
     JvmtiTagHashmapEntry* entry = _table[h];
     JvmtiTagHashmapEntry* prev = NULL;
     while (entry != NULL) {
-      oop orig_key = JNIHandles::resolve(entry->object());
-      assert(orig_key != NULL, "jni weak reference cleared!!");
-      if (key == orig_key) {
+      if (key == entry->object()) {
         break;
       }
       prev = entry;
@@ -418,54 +413,6 @@
   }
 }
 
-// memory region for young generation
-MemRegion JvmtiTagMap::_young_gen;
-
-// get the memory region used for the young generation
-void JvmtiTagMap::get_young_generation() {
-  CollectedHeap* ch = Universe::heap();
-  switch (ch->kind()) {
-    case (CollectedHeap::GenCollectedHeap): {
-      _young_gen = ((GenCollectedHeap*)ch)->get_gen(0)->reserved();
-      break;
-    }
-#ifndef SERIALGC
-    case (CollectedHeap::ParallelScavengeHeap): {
-      _young_gen = ((ParallelScavengeHeap*)ch)->young_gen()->reserved();
-      break;
-    }
-    case (CollectedHeap::G1CollectedHeap): {
-      // Until a more satisfactory solution is implemented, all
-      // oops in the tag map will require rehash at each gc.
-      // This is a correct, if extremely inefficient solution.
-      // See RFE 6621729 for related commentary.
-      _young_gen = ch->reserved_region();
-      break;
-    }
-#endif  // !SERIALGC
-    default:
-      ShouldNotReachHere();
-  }
-}
-
-// returns true if oop is in the young generation
-inline bool JvmtiTagMap::is_in_young(oop o) {
-  assert(_young_gen.start() != NULL, "checking");
-  void* p = (void*)o;
-  bool in_young = _young_gen.contains(p);
-  return in_young;
-}
-
-// returns the appropriate hashmap for a given object
-inline JvmtiTagHashmap* JvmtiTagMap::hashmap_for(oop o) {
-  if (is_in_young(o)) {
-    return _hashmap[0];
-  } else {
-    return _hashmap[1];
-  }
-}
-
-
 // create a JvmtiTagMap
 JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
   _env(env),
@@ -476,13 +423,7 @@
   assert(JvmtiThreadState_lock->is_locked(), "sanity check");
   assert(((JvmtiEnvBase *)env)->tag_map() == NULL, "tag map already exists for environment");
 
-  // create the hashmaps
-  for (int i=0; i<n_hashmaps; i++) {
-    _hashmap[i] = new JvmtiTagHashmap();
-  }
-
-  // get the memory region used by the young generation
-  get_young_generation();
+  _hashmap = new JvmtiTagHashmap();
 
   // finally add us to the environment
   ((JvmtiEnvBase *)env)->set_tag_map(this);
@@ -496,25 +437,20 @@
   // also being destroryed.
   ((JvmtiEnvBase *)_env)->set_tag_map(NULL);
 
-  // iterate over the hashmaps and destroy each of the entries
-  for (int i=0; i<n_hashmaps; i++) {
-    JvmtiTagHashmap* hashmap = _hashmap[i];
-    JvmtiTagHashmapEntry** table = hashmap->table();
-    for (int j=0; j<hashmap->size(); j++) {
-      JvmtiTagHashmapEntry *entry = table[j];
-      while (entry != NULL) {
-        JvmtiTagHashmapEntry* next = entry->next();
-        jweak ref = entry->object();
-        JNIHandles::destroy_weak_global(ref);
-        delete entry;
-        entry = next;
-      }
+  JvmtiTagHashmapEntry** table = _hashmap->table();
+  for (int j = 0; j < _hashmap->size(); j++) {
+    JvmtiTagHashmapEntry* entry = table[j];
+    while (entry != NULL) {
+      JvmtiTagHashmapEntry* next = entry->next();
+      delete entry;
+      entry = next;
     }
-
-    // finally destroy the hashmap
-    delete hashmap;
   }
 
+  // finally destroy the hashmap
+  delete _hashmap;
+  _hashmap = NULL;
+
   // remove any entries on the free list
   JvmtiTagHashmapEntry* entry = _free_entries;
   while (entry != NULL) {
@@ -522,12 +458,13 @@
     delete entry;
     entry = next;
   }
+  _free_entries = NULL;
 }
 
 // create a hashmap entry
 // - if there's an entry on the (per-environment) free list then this
 // is returned. Otherwise an new entry is allocated.
-JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(jweak ref, jlong tag) {
+JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(oop ref, jlong tag) {
   assert(Thread::current()->is_VM_thread() || is_locked(), "checking");
   JvmtiTagHashmapEntry* entry;
   if (_free_entries == NULL) {
@@ -558,10 +495,10 @@
 // returns the tag map for the given environments. If the tag map
 // doesn't exist then it is created.
 JvmtiTagMap* JvmtiTagMap::tag_map_for(JvmtiEnv* env) {
-  JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map();
+  JvmtiTagMap* tag_map = ((JvmtiEnvBase*)env)->tag_map();
   if (tag_map == NULL) {
     MutexLocker mu(JvmtiThreadState_lock);
-    tag_map = ((JvmtiEnvBase *)env)->tag_map();
+    tag_map = ((JvmtiEnvBase*)env)->tag_map();
     if (tag_map == NULL) {
       tag_map = new JvmtiTagMap(env);
     }
@@ -573,17 +510,13 @@
 
 // iterate over all entries in the tag map.
 void JvmtiTagMap::entry_iterate(JvmtiTagHashmapEntryClosure* closure) {
-  for (int i=0; i<n_hashmaps; i++) {
-    JvmtiTagHashmap* hashmap = _hashmap[i];
-    hashmap->entry_iterate(closure);
-  }
+  hashmap()->entry_iterate(closure);
 }
 
 // returns true if the hashmaps are empty
 bool JvmtiTagMap::is_empty() {
   assert(SafepointSynchronize::is_at_safepoint() || is_locked(), "checking");
-  assert(n_hashmaps == 2, "not implemented");
-  return ((_hashmap[0]->entry_count() == 0) && (_hashmap[1]->entry_count() == 0));
+  return hashmap()->entry_count() == 0;
 }
 
 
@@ -591,7 +524,7 @@
 // not tagged
 //
 static inline jlong tag_for(JvmtiTagMap* tag_map, oop o) {
-  JvmtiTagHashmapEntry* entry = tag_map->hashmap_for(o)->find(o);
+  JvmtiTagHashmapEntry* entry = tag_map->hashmap()->find(o);
   if (entry == NULL) {
     return 0;
   } else {
@@ -655,7 +588,7 @@
 
     // record the context
     _tag_map = tag_map;
-    _hashmap = tag_map->hashmap_for(_o);
+    _hashmap = tag_map->hashmap();
     _entry = _hashmap->find(_o);
 
     // get object tag
@@ -694,23 +627,18 @@
     if (obj_tag != 0) {
       // callback has tagged the object
       assert(Thread::current()->is_VM_thread(), "must be VMThread");
-      HandleMark hm;
-      Handle h(o);
-      jweak ref = JNIHandles::make_weak_global(h);
-      entry = tag_map()->create_entry(ref, obj_tag);
+      entry = tag_map()->create_entry(o, obj_tag);
       hashmap->add(o, entry);
     }
   } else {
     // object was previously tagged - the callback may have untagged
     // the object or changed the tag value
     if (obj_tag == 0) {
-      jweak ref = entry->object();
 
       JvmtiTagHashmapEntry* entry_removed = hashmap->remove(o);
       assert(entry_removed == entry, "checking");
       tag_map()->destroy_entry(entry);
 
-      JNIHandles::destroy_weak_global(ref);
     } else {
       if (obj_tag != entry->tag()) {
          entry->set_tag(obj_tag);
@@ -760,7 +688,7 @@
       // for Classes the klassOop is tagged
       _referrer = klassOop_if_java_lang_Class(referrer);
       // record the context
-      _referrer_hashmap = tag_map->hashmap_for(_referrer);
+      _referrer_hashmap = tag_map->hashmap();
       _referrer_entry = _referrer_hashmap->find(_referrer);
 
       // get object tag
@@ -796,8 +724,7 @@
 //
 // This function is performance critical. If many threads attempt to tag objects
 // around the same time then it's possible that the Mutex associated with the
-// tag map will be a hot lock. Eliminating this lock will not eliminate the issue
-// because creating a JNI weak reference requires acquiring a global lock also.
+// tag map will be a hot lock.
 void JvmtiTagMap::set_tag(jobject object, jlong tag) {
   MutexLocker ml(lock());
 
@@ -808,22 +735,14 @@
   o = klassOop_if_java_lang_Class(o);
 
   // see if the object is already tagged
-  JvmtiTagHashmap* hashmap = hashmap_for(o);
+  JvmtiTagHashmap* hashmap = _hashmap;
   JvmtiTagHashmapEntry* entry = hashmap->find(o);
 
   // if the object is not already tagged then we tag it
   if (entry == NULL) {
     if (tag != 0) {
-      HandleMark hm;
-      Handle h(o);
-      jweak ref = JNIHandles::make_weak_global(h);
-
-      // the object may have moved because make_weak_global may
-      // have blocked - thus it is necessary resolve the handle
-      // and re-hash the object.
-      o = h();
-      entry = create_entry(ref, tag);
-      hashmap_for(o)->add(o, entry);
+      entry = create_entry(o, tag);
+      hashmap->add(o, entry);
     } else {
       // no-op
     }
@@ -831,13 +750,9 @@
     // if the object is already tagged then we either update
     // the tag (if a new tag value has been provided)
     // or remove the object if the new tag value is 0.
-    // Removing the object requires that we also delete the JNI
-    // weak ref to the object.
     if (tag == 0) {
-      jweak ref = entry->object();
       hashmap->remove(o);
       destroy_entry(entry);
-      JNIHandles::destroy_weak_global(ref);
     } else {
       entry->set_tag(tag);
     }
@@ -1626,8 +1541,8 @@
   void do_entry(JvmtiTagHashmapEntry* entry) {
     for (int i=0; i<_tag_count; i++) {
       if (_tags[i] == entry->tag()) {
-        oop o = JNIHandles::resolve(entry->object());
-        assert(o != NULL && o != JNIHandles::deleted_handle(), "sanity check");
+        oop o = entry->object();
+        assert(o != NULL, "sanity check");
 
         // the mirror is tagged
         if (o->is_klass()) {
@@ -3374,62 +3289,25 @@
 }
 
 
-// called post-GC
-// - for each JVMTI environment with an object tag map, call its rehash
-// function to re-sync with the new object locations.
-void JvmtiTagMap::gc_epilogue(bool full) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
+void JvmtiTagMap::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+  // No locks during VM bring-up (0 threads) and no safepoints after main
+  // thread creation and before VMThread creation (1 thread); initial GC
+  // verification can happen in that window which gets to here.
+  assert(Threads::number_of_threads() <= 1 ||
+         SafepointSynchronize::is_at_safepoint(),
+         "must be executed at a safepoint");
   if (JvmtiEnv::environments_might_exist()) {
-    // re-obtain the memory region for the young generation (might
-    // changed due to adaptive resizing policy)
-    get_young_generation();
-
     JvmtiEnvIterator it;
     for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
       JvmtiTagMap* tag_map = env->tag_map();
       if (tag_map != NULL && !tag_map->is_empty()) {
-        TraceTime t(full ? "JVMTI Full Rehash " : "JVMTI Rehash ", TraceJVMTIObjectTagging);
-        if (full) {
-          tag_map->rehash(0, n_hashmaps);
-        } else {
-          tag_map->rehash(0, 0);        // tag map for young gen only
-        }
+        tag_map->do_weak_oops(is_alive, f);
       }
     }
   }
 }
 
-// CMS has completed referencing processing so we may have JNI weak refs
-// to objects in the CMS generation that have been GC'ed.
-void JvmtiTagMap::cms_ref_processing_epilogue() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
-  assert(UseConcMarkSweepGC, "should only be used with CMS");
-  if (JvmtiEnv::environments_might_exist()) {
-    JvmtiEnvIterator it;
-    for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
-      JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map();
-      if (tag_map != NULL && !tag_map->is_empty()) {
-        TraceTime t("JVMTI Rehash (CMS) ", TraceJVMTIObjectTagging);
-        tag_map->rehash(1, n_hashmaps);    // assume CMS not used in young gen
-      }
-    }
-  }
-}
-
-
-// For each entry in the hashmaps 'start' to 'end' :
-//
-// 1. resolve the JNI weak reference
-//
-// 2. If it resolves to NULL it means the object has been freed so the entry
-//    is removed, the weak reference destroyed, and the object free event is
-//    posted (if enabled).
-//
-// 3. If the weak reference resolves to an object then we re-hash the object
-//    to see if it has moved or has been promoted (from the young to the old
-//    generation for example).
-//
-void JvmtiTagMap::rehash(int start, int end) {
+void JvmtiTagMap::do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f) {
 
   // does this environment have the OBJECT_FREE event enabled
   bool post_object_free = env()->is_enabled(JVMTI_EVENT_OBJECT_FREE);
@@ -3437,143 +3315,98 @@
   // counters used for trace message
   int freed = 0;
   int moved = 0;
-  int promoted = 0;
-
-  // we assume there are two hashmaps - one for the young generation
-  // and the other for all other spaces.
-  assert(n_hashmaps == 2, "not implemented");
-  JvmtiTagHashmap* young_hashmap = _hashmap[0];
-  JvmtiTagHashmap* other_hashmap = _hashmap[1];
+
+  JvmtiTagHashmap* hashmap = this->hashmap();
 
   // reenable sizing (if disabled)
-  young_hashmap->set_resizing_enabled(true);
-  other_hashmap->set_resizing_enabled(true);
-
-  // when re-hashing the hashmap corresponding to the young generation we
-  // collect the entries corresponding to objects that have been promoted.
-  JvmtiTagHashmapEntry* promoted_entries = NULL;
-
-  if (end >= n_hashmaps) {
-    end = n_hashmaps - 1;
+  hashmap->set_resizing_enabled(true);
+
+  // if the hashmap is empty then we can skip it
+  if (hashmap->_entry_count == 0) {
+    return;
   }
 
-  for (int i=start; i <= end; i++) {
-    JvmtiTagHashmap* hashmap = _hashmap[i];
-
-    // if the hashmap is empty then we can skip it
-    if (hashmap->_entry_count == 0) {
-      continue;
-    }
-
-    // now iterate through each entry in the table
-
-    JvmtiTagHashmapEntry** table = hashmap->table();
-    int size = hashmap->size();
-
-    for (int pos=0; pos<size; pos++) {
-      JvmtiTagHashmapEntry* entry = table[pos];
-      JvmtiTagHashmapEntry* prev = NULL;
-
-      while (entry != NULL) {
-        JvmtiTagHashmapEntry* next = entry->next();
-
-        jweak ref = entry->object();
-        oop oop = JNIHandles::resolve(ref);
-
-        // has object been GC'ed
-        if (oop == NULL) {
-          // grab the tag
-          jlong tag = entry->tag();
-          guarantee(tag != 0, "checking");
-
-          // remove GC'ed entry from hashmap and return the
-          // entry to the free list
-          hashmap->remove(prev, pos, entry);
-          destroy_entry(entry);
-
-          // destroy the weak ref
-          JNIHandles::destroy_weak_global(ref);
-
-          // post the event to the profiler
-          if (post_object_free) {
-            JvmtiExport::post_object_free(env(), tag);
+  // now iterate through each entry in the table
+
+  JvmtiTagHashmapEntry** table = hashmap->table();
+  int size = hashmap->size();
+
+  JvmtiTagHashmapEntry* delayed_add = NULL;
+
+  for (int pos = 0; pos < size; ++pos) {
+    JvmtiTagHashmapEntry* entry = table[pos];
+    JvmtiTagHashmapEntry* prev = NULL;
+
+    while (entry != NULL) {
+      JvmtiTagHashmapEntry* next = entry->next();
+
+      oop* obj = entry->object_addr();
+
+      // has object been GC'ed
+      if (!is_alive->do_object_b(entry->object())) {
+        // grab the tag
+        jlong tag = entry->tag();
+        guarantee(tag != 0, "checking");
+
+        // remove GC'ed entry from hashmap and return the
+        // entry to the free list
+        hashmap->remove(prev, pos, entry);
+        destroy_entry(entry);
+
+        // post the event to the profiler
+        if (post_object_free) {
+          JvmtiExport::post_object_free(env(), tag);
+        }
+
+        ++freed;
+      } else {
+        f->do_oop(entry->object_addr());
+        oop new_oop = entry->object();
+
+        // if the object has moved then re-hash it and move its
+        // entry to its new location.
+        unsigned int new_pos = JvmtiTagHashmap::hash(new_oop, size);
+        if (new_pos != (unsigned int)pos) {
+          if (prev == NULL) {
+            table[pos] = next;
+          } else {
+            prev->set_next(next);
           }
-
-          freed++;
-          entry = next;
-          continue;
-        }
-
-        // if this is the young hashmap then the object is either promoted
-        // or moved.
-        // if this is the other hashmap then the object is moved.
-
-        bool same_gen;
-        if (i == 0) {
-          assert(hashmap == young_hashmap, "checking");
-          same_gen = is_in_young(oop);
-        } else {
-          same_gen = true;
-        }
-
-
-        if (same_gen) {
-          // if the object has moved then re-hash it and move its
-          // entry to its new location.
-          unsigned int new_pos = JvmtiTagHashmap::hash(oop, size);
-          if (new_pos != (unsigned int)pos) {
-            if (prev == NULL) {
-              table[pos] = next;
-            } else {
-              prev->set_next(next);
-            }
+          if (new_pos < (unsigned int)pos) {
             entry->set_next(table[new_pos]);
             table[new_pos] = entry;
-            moved++;
           } else {
-            // object didn't move
-            prev = entry;
+            // Delay adding this entry to it's new position as we'd end up
+            // hitting it again during this iteration.
+            entry->set_next(delayed_add);
+            delayed_add = entry;
           }
+          moved++;
         } else {
-          // object has been promoted so remove the entry from the
-          // young hashmap
-          assert(hashmap == young_hashmap, "checking");
-          hashmap->remove(prev, pos, entry);
-
-          // move the entry to the promoted list
-          entry->set_next(promoted_entries);
-          promoted_entries = entry;
+          // object didn't move
+          prev = entry;
         }
-
-        entry = next;
       }
+
+      entry = next;
     }
   }
 
-
-  // add the entries, corresponding to the promoted objects, to the
-  // other hashmap.
-  JvmtiTagHashmapEntry* entry = promoted_entries;
-  while (entry != NULL) {
-    oop o = JNIHandles::resolve(entry->object());
-    assert(hashmap_for(o) == other_hashmap, "checking");
-    JvmtiTagHashmapEntry* next = entry->next();
-    other_hashmap->add(o, entry);
-    entry = next;
-    promoted++;
+  // Re-add all the entries which were kept aside
+  while (delayed_add != NULL) {
+    JvmtiTagHashmapEntry* next = delayed_add->next();
+    unsigned int pos = JvmtiTagHashmap::hash(delayed_add->object(), size);
+    delayed_add->set_next(table[pos]);
+    table[pos] = delayed_add;
+    delayed_add = next;
   }
 
   // stats
   if (TraceJVMTIObjectTagging) {
-    int total_moves = promoted + moved;
-
-    int post_total = 0;
-    for (int i=0; i<n_hashmaps; i++) {
-      post_total += _hashmap[i]->_entry_count;
-    }
+    int post_total = hashmap->_entry_count;
     int pre_total = post_total + freed;
 
-    tty->print("(%d->%d, %d freed, %d promoted, %d total moves)",
-        pre_total, post_total, freed, promoted, total_moves);
+    tty->print_cr("(%d->%d, %d freed, %d total moves)",
+        pre_total, post_total, freed, moved);
   }
 }
--- a/src/share/vm/prims/jvmtiTagMap.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/jvmtiTagMap.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -45,17 +45,12 @@
  private:
 
   enum{
-    n_hashmaps = 2,                                 // encapsulates 2 hashmaps
-    max_free_entries = 4096                         // maximum number of free entries per env
+    max_free_entries = 4096         // maximum number of free entries per env
   };
 
-  // memory region for young generation
-  static MemRegion _young_gen;
-  static void get_young_generation();
-
   JvmtiEnv*             _env;                       // the jvmti environment
   Mutex                 _lock;                      // lock for this tag map
-  JvmtiTagHashmap*      _hashmap[n_hashmaps];       // the hashmaps
+  JvmtiTagHashmap*      _hashmap;                   // the hashmap
 
   JvmtiTagHashmapEntry* _free_entries;              // free list for this environment
   int _free_entries_count;                          // number of entries on the free list
@@ -67,11 +62,7 @@
   inline Mutex* lock()                      { return &_lock; }
   inline JvmtiEnv* env() const              { return _env; }
 
-  // rehash tags maps for generation start to end
-  void rehash(int start, int end);
-
-  // indicates if the object is in the young generation
-  static bool is_in_young(oop o);
+  void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
 
   // iterate over all entries in this tag map
   void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
@@ -81,11 +72,10 @@
   // indicates if this tag map is locked
   bool is_locked()                          { return lock()->is_locked(); }
 
-  // return the appropriate hashmap for a given object
-  JvmtiTagHashmap* hashmap_for(oop o);
+  JvmtiTagHashmap* hashmap() { return _hashmap; }
 
   // create/destroy entries
-  JvmtiTagHashmapEntry* create_entry(jweak ref, jlong tag);
+  JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag);
   void destroy_entry(JvmtiTagHashmapEntry* entry);
 
   // returns true if the hashmaps are empty
@@ -134,11 +124,8 @@
                                    jint* count_ptr, jobject** object_result_ptr,
                                    jlong** tag_result_ptr);
 
-  // call post-GC to rehash the tag maps.
-  static void gc_epilogue(bool full);
-
-  // call after referencing processing has completed (CMS)
-  static void cms_ref_processing_epilogue();
+  static void weak_oops_do(
+      BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN;
 };
 
 #endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP
--- a/src/share/vm/prims/methodComparator.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/methodComparator.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -194,10 +194,10 @@
 
   case Bytecodes::_ldc   : // fall through
   case Bytecodes::_ldc_w : {
-    Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method(), _s_old->bci());
-    Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method(), _s_new->bci());
-    int cpi_old = ldc_old->pool_index();
-    int cpi_new = ldc_new->pool_index();
+    Bytecode_loadconstant ldc_old(_s_old->method(), _s_old->bci());
+    Bytecode_loadconstant ldc_new(_s_new->method(), _s_new->bci());
+    int cpi_old = ldc_old.pool_index();
+    int cpi_new = ldc_new.pool_index();
     if (!pool_constants_same(cpi_old, cpi_new))
       return false;
     break;
@@ -267,8 +267,8 @@
   case Bytecodes::_ifnonnull : // fall through
   case Bytecodes::_ifnull    : // fall through
   case Bytecodes::_jsr       : {
-    int old_ofs = _s_old->bytecode()->get_offset_s2(c_old);
-    int new_ofs = _s_new->bytecode()->get_offset_s2(c_new);
+    int old_ofs = _s_old->bytecode().get_offset_s2(c_old);
+    int new_ofs = _s_new->bytecode().get_offset_s2(c_new);
     if (_switchable_test) {
       int old_dest = _s_old->bci() + old_ofs;
       int new_dest = _s_new->bci() + new_ofs;
@@ -304,8 +304,8 @@
 
   case Bytecodes::_goto_w : // fall through
   case Bytecodes::_jsr_w  : {
-    int old_ofs = _s_old->bytecode()->get_offset_s4(c_old);
-    int new_ofs = _s_new->bytecode()->get_offset_s4(c_new);
+    int old_ofs = _s_old->bytecode().get_offset_s4(c_old);
+    int new_ofs = _s_new->bytecode().get_offset_s4(c_new);
     if (_switchable_test) {
       int old_dest = _s_old->bci() + old_ofs;
       int new_dest = _s_new->bci() + new_ofs;
--- a/src/share/vm/prims/methodHandleWalk.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,7 +137,6 @@
 
 
 void MethodHandleChain::lose(const char* msg, TRAPS) {
-  assert(false, "lose");
   _lose_message = msg;
   if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) {
     // throw a preallocated exception
--- a/src/share/vm/prims/methodHandles.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/methodHandles.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,7 +111,7 @@
 //------------------------------------------------------------------------------
 // MethodHandles::generate_adapters
 //
-void MethodHandles::generate_adapters(TRAPS) {
+void MethodHandles::generate_adapters() {
   if (!EnableMethodHandles || SystemDictionary::MethodHandle_klass() == NULL)  return;
 
   assert(_adapter_code == NULL, "generate only once");
@@ -123,20 +123,20 @@
     vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
   CodeBuffer code(_adapter_code);
   MethodHandlesAdapterGenerator g(&code);
-  g.generate(CHECK);
+  g.generate();
 }
 
 
 //------------------------------------------------------------------------------
 // MethodHandlesAdapterGenerator::generate
 //
-void MethodHandlesAdapterGenerator::generate(TRAPS) {
+void MethodHandlesAdapterGenerator::generate() {
   // Generate generic method handle adapters.
   for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
        ek < MethodHandles::_EK_LIMIT;
        ek = MethodHandles::EntryKind(1 + (int)ek)) {
     StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
-    MethodHandles::generate_method_handle_stub(_masm, ek, CHECK);
+    MethodHandles::generate_method_handle_stub(_masm, ek);
   }
 }
 
@@ -2621,10 +2621,20 @@
         warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
         enable_MH = false;
       }
+    } else {
+      enable_MH = false;
     }
   }
 
   if (enable_MH) {
+    // We need to link the MethodHandleImpl klass before we generate
+    // the method handle adapters as the _raise_exception adapter uses
+    // one of its methods (and its c2i-adapter).
+    KlassHandle    k  = SystemDictionaryHandles::MethodHandleImpl_klass();
+    instanceKlass* ik = instanceKlass::cast(k());
+    ik->link_class(CHECK);
+
+    MethodHandles::generate_adapters();
     MethodHandles::set_enabled(true);
   }
 
@@ -2645,10 +2655,5 @@
       MethodHandles::set_enabled(true);
     }
   }
-
-  // Generate method handles adapters if enabled.
-  if (MethodHandles::enabled()) {
-    MethodHandles::generate_adapters(CHECK);
-  }
 }
 JVM_END
--- a/src/share/vm/prims/methodHandles.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/methodHandles.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,11 +294,11 @@
   enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
 
   // Generate MethodHandles adapters.
-  static void generate_adapters(TRAPS);
+  static void generate_adapters();
 
   // Called from InterpreterGenerator and MethodHandlesAdapterGenerator.
   static address generate_method_handle_interpreter_entry(MacroAssembler* _masm);
-  static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek, TRAPS);
+  static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek);
 
   // argument list parsing
   static int argument_slot(oop method_type, int arg);
@@ -530,7 +530,7 @@
 public:
   MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code) {}
 
-  void generate(TRAPS);
+  void generate();
 };
 
 #endif // SHARE_VM_PRIMS_METHODHANDLES_HPP
--- a/src/share/vm/prims/unsafe.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/prims/unsafe.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -154,12 +154,11 @@
 
 #define GET_FIELD_VOLATILE(obj, offset, type_name, v) \
   oop p = JNIHandles::resolve(obj); \
-  volatile type_name v = *(volatile type_name*)index_oop_from_field_offset_long(p, offset)
+  volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset));
 
 #define SET_FIELD_VOLATILE(obj, offset, type_name, x) \
   oop p = JNIHandles::resolve(obj); \
-  *(volatile type_name*)index_oop_from_field_offset_long(p, offset) = x; \
-  OrderAccess::fence();
+  OrderAccess::release_store_fence((volatile type_name*)index_oop_from_field_offset_long(p, offset), x);
 
 // Macros for oops that check UseCompressedOops
 
@@ -181,7 +180,8 @@
     v = oopDesc::decode_heap_oop(n);                               \
   } else {                            \
     v = *(volatile oop*)index_oop_from_field_offset_long(p, offset);       \
-  }
+  } \
+  OrderAccess::acquire();
 
 
 // Get/SetObject must be special-cased, since it works with handles.
@@ -248,14 +248,22 @@
   UnsafeWrapper("Unsafe_SetObjectVolatile");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
+  void* addr = index_oop_from_field_offset_long(p, offset);
+  OrderAccess::release();
   if (UseCompressedOops) {
-    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((narrowOop*)addr, x);
   } else {
-    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((oop*)addr, x);
   }
   OrderAccess::fence();
 UNSAFE_END
 
+#if defined(SPARC) || defined(X86)
+// Sparc and X86 have atomic jlong (8 bytes) instructions
+
+#else
+// Keep old code for platforms which may not have atomic jlong (8 bytes) instructions
+
 // Volatile long versions must use locks if !VM_Version::supports_cx8().
 // support_cx8 is a surrogate for 'supports atomic long memory ops'.
 
@@ -291,6 +299,7 @@
   }
 UNSAFE_END
 
+#endif // not SPARC and not X86
 
 #define DEFINE_GETSETOOP(jboolean, Boolean) \
  \
@@ -320,6 +329,16 @@
  \
 // END DEFINE_GETSETOOP.
 
+DEFINE_GETSETOOP(jboolean, Boolean)
+DEFINE_GETSETOOP(jbyte, Byte)
+DEFINE_GETSETOOP(jshort, Short);
+DEFINE_GETSETOOP(jchar, Char);
+DEFINE_GETSETOOP(jint, Int);
+DEFINE_GETSETOOP(jlong, Long);
+DEFINE_GETSETOOP(jfloat, Float);
+DEFINE_GETSETOOP(jdouble, Double);
+
+#undef DEFINE_GETSETOOP
 
 #define DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean) \
  \
@@ -336,47 +355,49 @@
  \
 // END DEFINE_GETSETOOP_VOLATILE.
 
-DEFINE_GETSETOOP(jboolean, Boolean)
-DEFINE_GETSETOOP(jbyte, Byte)
-DEFINE_GETSETOOP(jshort, Short);
-DEFINE_GETSETOOP(jchar, Char);
-DEFINE_GETSETOOP(jint, Int);
-DEFINE_GETSETOOP(jlong, Long);
-DEFINE_GETSETOOP(jfloat, Float);
-DEFINE_GETSETOOP(jdouble, Double);
-
 DEFINE_GETSETOOP_VOLATILE(jboolean, Boolean)
 DEFINE_GETSETOOP_VOLATILE(jbyte, Byte)
 DEFINE_GETSETOOP_VOLATILE(jshort, Short);
 DEFINE_GETSETOOP_VOLATILE(jchar, Char);
 DEFINE_GETSETOOP_VOLATILE(jint, Int);
-// no long -- handled specially
 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);
 
-#undef DEFINE_GETSETOOP
+#if defined(SPARC) || defined(X86)
+// Sparc and X86 have atomic jlong (8 bytes) instructions
+DEFINE_GETSETOOP_VOLATILE(jlong, Long);
+#endif
+
+#undef DEFINE_GETSETOOP_VOLATILE
 
 // The non-intrinsified versions of setOrdered just use setVolatile
 
-UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x)) \
-  UnsafeWrapper("Unsafe_SetOrderedInt"); \
-  SET_FIELD_VOLATILE(obj, offset, jint, x); \
+UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x))
+  UnsafeWrapper("Unsafe_SetOrderedInt");
+  SET_FIELD_VOLATILE(obj, offset, jint, x);
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
   UnsafeWrapper("Unsafe_SetOrderedObject");
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
+  void* addr = index_oop_from_field_offset_long(p, offset);
+  OrderAccess::release();
   if (UseCompressedOops) {
-    oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((narrowOop*)addr, x);
   } else {
-    oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+    oop_store((oop*)addr, x);
   }
   OrderAccess::fence();
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x))
   UnsafeWrapper("Unsafe_SetOrderedLong");
+#if defined(SPARC) || defined(X86)
+  // Sparc and X86 have atomic jlong (8 bytes) instructions
+  SET_FIELD_VOLATILE(obj, offset, jlong, x);
+#else
+  // Keep old code for platforms which may not have atomic long (8 bytes) instructions
   {
     if (VM_Version::supports_cx8()) {
       SET_FIELD_VOLATILE(obj, offset, jlong, x);
@@ -388,6 +409,7 @@
       *addr = x;
     }
   }
+#endif
 UNSAFE_END
 
 ////// Data in the C heap.
--- a/src/share/vm/runtime/arguments.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -918,9 +918,7 @@
   } else if (strcmp(key, "sun.java.command") == 0) {
     _java_command = value;
 
-    // don't add this property to the properties exposed to the java application
-    FreeHeap(key);
-    return true;
+    // Record value in Arguments, but let it get passed to Java.
   } else if (strcmp(key, "sun.java.launcher.pid") == 0) {
     // launcher.pid property is private and is processed
     // in process_sun_java_launcher_properties();
@@ -2297,14 +2295,15 @@
     } else if (match_option(option, "-Xoss", &tail)) {
           // HotSpot does not have separate native and Java stacks, ignore silently for compatibility
     // -Xmaxjitcodesize
-    } else if (match_option(option, "-Xmaxjitcodesize", &tail)) {
+    } else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
+               match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) {
       julong long_ReservedCodeCacheSize = 0;
       ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize,
                                             (size_t)InitialCodeCacheSize);
       if (errcode != arg_in_range) {
         jio_fprintf(defaultStream::error_stream(),
-                    "Invalid maximum code cache size: %s\n",
-                    option->optionString);
+                    "Invalid maximum code cache size: %s. Should be greater than InitialCodeCacheSize=%dK\n",
+                    option->optionString, InitialCodeCacheSize/K);
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
@@ -3030,15 +3029,6 @@
     }
     ScavengeRootsInCode = 1;
   }
-#ifdef COMPILER2
-  if (EnableInvokeDynamic && DoEscapeAnalysis) {
-    // TODO: We need to find rules for invokedynamic and EA.  For now,
-    // simply disable EA by default.
-    if (FLAG_IS_DEFAULT(DoEscapeAnalysis)) {
-      DoEscapeAnalysis = false;
-    }
-  }
-#endif
 
   if (PrintGCDetails) {
     // Turn on -verbose:gc options as well
--- a/src/share/vm/runtime/deoptimization.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/deoptimization.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -395,8 +395,8 @@
   {
     HandleMark hm;
     methodHandle method(thread, array->element(0)->method());
-    Bytecode_invoke* invoke = Bytecode_invoke_at_check(method, array->element(0)->bci());
-    return_type = (invoke != NULL) ? invoke->result_type(thread) : T_ILLEGAL;
+    Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
+    return_type = invoke.is_valid() ? invoke.result_type(thread) : T_ILLEGAL;
   }
 
   // Compute information for handling adapters and adjusting the frame size of the caller.
@@ -600,8 +600,8 @@
           cur_code == Bytecodes::_invokespecial ||
           cur_code == Bytecodes::_invokestatic  ||
           cur_code == Bytecodes::_invokeinterface) {
-        Bytecode_invoke* invoke = Bytecode_invoke_at(mh, iframe->interpreter_frame_bci());
-        symbolHandle signature(thread, invoke->signature());
+        Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
+        symbolHandle signature(thread, invoke.signature());
         ArgumentSizeComputer asc(signature);
         cur_invoke_parameter_size = asc.size();
         if (cur_code != Bytecodes::_invokestatic) {
@@ -963,7 +963,7 @@
       if (bci == SynchronizationEntryBCI) {
         code_name = "sync entry";
       } else {
-        Bytecodes::Code code = Bytecodes::code_at(vf->method(), bci);
+        Bytecodes::Code code = vf->method()->code_at(bci);
         code_name = Bytecodes::name(code);
       }
       tty->print(" - %s", code_name);
@@ -1224,7 +1224,7 @@
     ScopeDesc*      trap_scope  = cvf->scope();
     methodHandle    trap_method = trap_scope->method();
     int             trap_bci    = trap_scope->bci();
-    Bytecodes::Code trap_bc     = Bytecode_at(trap_method->bcp_from(trap_bci))->java_code();
+    Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
 
     // Record this event in the histogram.
     gather_statistics(reason, action, trap_bc);
--- a/src/share/vm/runtime/frame.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/frame.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,8 @@
 #include "runtime/signature.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "utilities/decoder.hpp"
+
 #ifdef TARGET_ARCH_x86
 # include "nativeInst_x86.hpp"
 #endif
@@ -652,7 +654,7 @@
   // names if pc is within jvm.dll or libjvm.so, because JVM only has
   // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this
   // only for native libraries.
-  if (!in_vm) {
+  if (!in_vm || Decoder::can_decode_C_frame_in_vm()) {
     found = os::dll_address_to_function_name(pc, buf, buflen, &offset);
 
     if (found) {
@@ -928,10 +930,10 @@
   // This is used sometimes for calling into the VM, not for another
   // interpreted or compiled frame.
   if (!m->is_native()) {
-    Bytecode_invoke *call = Bytecode_invoke_at_check(m, bci);
-    if (call != NULL) {
-      signature = symbolHandle(thread, call->signature());
-      has_receiver = call->has_receiver();
+    Bytecode_invoke call = Bytecode_invoke_check(m, bci);
+    if (call.is_valid()) {
+      signature = symbolHandle(thread, call.signature());
+      has_receiver = call.has_receiver();
       if (map->include_argument_oops() &&
           interpreter_frame_expression_stack_size() > 0) {
         ResourceMark rm(thread);  // is this right ???
@@ -1071,28 +1073,20 @@
   }
 }
 
-BasicLock* frame::compiled_synchronized_native_monitor(nmethod* nm) {
-  if (nm == NULL) {
-    assert(_cb != NULL && _cb->is_nmethod() &&
-           nm->method()->is_native() &&
-           nm->method()->is_synchronized(),
-           "should not call this otherwise");
-    nm = (nmethod*) _cb;
-  }
-  int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_sp_offset());
+BasicLock* frame::get_native_monitor() {
+  nmethod* nm = (nmethod*)_cb;
+  assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
+         "Should not call this unless it's a native nmethod");
+  int byte_offset = in_bytes(nm->native_basic_lock_sp_offset());
   assert(byte_offset >= 0, "should not see invalid offset");
   return (BasicLock*) &sp()[byte_offset / wordSize];
 }
 
-oop frame::compiled_synchronized_native_monitor_owner(nmethod* nm) {
-  if (nm == NULL) {
-    assert(_cb != NULL && _cb->is_nmethod() &&
-           nm->method()->is_native() &&
-           nm->method()->is_synchronized(),
-           "should not call this otherwise");
-    nm = (nmethod*) _cb;
-  }
-  int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_owner_sp_offset());
+oop frame::get_native_receiver() {
+  nmethod* nm = (nmethod*)_cb;
+  assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
+         "Should not call this unless it's a native nmethod");
+  int byte_offset = in_bytes(nm->native_receiver_sp_offset());
   assert(byte_offset >= 0, "should not see invalid offset");
   oop owner = ((oop*) sp())[byte_offset / wordSize];
   assert( Universe::heap()->is_in(owner), "bad receiver" );
--- a/src/share/vm/runtime/frame.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/frame.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -254,10 +254,10 @@
 
   // Return the monitor owner and BasicLock for compiled synchronized
   // native methods so that biased locking can revoke the receiver's
-  // bias if necessary. Takes optional nmethod for this frame as
-  // argument to avoid performing repeated lookups in code cache.
-  BasicLock* compiled_synchronized_native_monitor      (nmethod* nm = NULL);
-  oop        compiled_synchronized_native_monitor_owner(nmethod* nm = NULL);
+  // bias if necessary.  This is also used by JVMTI's GetLocalInstance method
+  // (via VM_GetReceiver) to retrieve the receiver from a native wrapper frame.
+  BasicLock* get_native_monitor();
+  oop        get_native_receiver();
 
   // Find receiver for an invoke when arguments are just pushed on stack (i.e., callee stack-frame is
   // not setup)
--- a/src/share/vm/runtime/globals.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/globals.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1198,9 +1198,6 @@
   product(ccstr, TraceJVMTI, NULL,                                          \
           "Trace flags for JVMTI functions and events")                     \
                                                                             \
-  product(bool, ForceFullGCJVMTIEpilogues, false,                           \
-          "Force 'Full GC' was done semantics for JVMTI GC epilogues")      \
-                                                                            \
   /* This option can change an EMCP method into an obsolete method. */      \
   /* This can affect tests that except specific methods to be EMCP. */      \
   /* This option should be used with caution. */                            \
--- a/src/share/vm/runtime/java.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/java.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -515,8 +515,8 @@
 }
 
 void vm_exit(int code) {
-  Thread* thread = ThreadLocalStorage::thread_index() == -1 ? NULL
-    : ThreadLocalStorage::get_thread_slow();
+  Thread* thread = ThreadLocalStorage::is_initialized() ?
+    ThreadLocalStorage::get_thread_slow() : NULL;
   if (thread == NULL) {
     // we have serious problems -- just exit
     vm_direct_exit(code);
@@ -553,8 +553,9 @@
   // Calling 'exit_globals()' will disable thread-local-storage and cause all
   // kinds of assertions to trigger in debug mode.
   if (is_init_completed()) {
-    Thread* thread = Thread::current();
-    if (thread->is_Java_thread()) {
+    Thread* thread = ThreadLocalStorage::is_initialized() ?
+                     ThreadLocalStorage::get_thread_slow() : NULL;
+    if (thread != NULL && thread->is_Java_thread()) {
       // We are leaving the VM, set state to native (in case any OS exit
       // handlers call back to the VM)
       JavaThread* jt = (JavaThread*)thread;
@@ -662,7 +663,8 @@
     }
     _current = JDK_Version(major, minor, micro, info.update_version,
                            info.special_update_version, build,
-                           info.thread_park_blocker == 1);
+                           info.thread_park_blocker == 1,
+                           info.post_vm_init_hook_enabled == 1);
   }
 }
 
--- a/src/share/vm/runtime/java.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/java.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -92,6 +92,7 @@
   bool _partially_initialized;
 
   bool _thread_park_blocker;
+  bool _post_vm_init_hook_enabled;
 
   bool is_valid() const {
     return (_major != 0 || _partially_initialized);
@@ -113,14 +114,15 @@
 
   JDK_Version() : _major(0), _minor(0), _micro(0), _update(0),
                   _special(0), _build(0), _partially_initialized(false),
-                  _thread_park_blocker(false) {}
+                  _thread_park_blocker(false), _post_vm_init_hook_enabled(false) {}
 
   JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0,
               uint8_t update = 0, uint8_t special = 0, uint8_t build = 0,
-              bool thread_park_blocker = false) :
+              bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false) :
       _major(major), _minor(minor), _micro(micro), _update(update),
       _special(special), _build(build), _partially_initialized(false),
-      _thread_park_blocker(thread_park_blocker) {}
+      _thread_park_blocker(thread_park_blocker),
+      _post_vm_init_hook_enabled(post_vm_init_hook_enabled) {}
 
   // Returns the current running JDK version
   static JDK_Version current() { return _current; }
@@ -144,6 +146,9 @@
   bool supports_thread_park_blocker() const {
     return _thread_park_blocker;
   }
+  bool post_vm_init_hook_enabled() const {
+    return _post_vm_init_hook_enabled;
+  }
 
   // Performs a full ordering comparison using all fields (update, build, etc.)
   int compare(const JDK_Version& other) const;
--- a/src/share/vm/runtime/jniHandles.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/jniHandles.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutexLocker.hpp"
 #ifdef TARGET_OS_FAMILY_linux
@@ -428,6 +429,12 @@
       break;
     }
   }
+
+  /*
+   * JVMTI data structures may also contain weak oops.  The iteration of them
+   * is placed here so that we don't need to add it to each of the collectors.
+   */
+  JvmtiExport::weak_oops_do(is_alive, f);
 }
 
 
--- a/src/share/vm/runtime/mutexLocker.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/mutexLocker.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,8 +80,6 @@
 Monitor* iCMS_lock                    = NULL;
 Monitor* FullGCCount_lock             = NULL;
 Monitor* CMark_lock                   = NULL;
-Monitor* ZF_mon                       = NULL;
-Monitor* Cleanup_mon                  = NULL;
 Mutex*   CMRegionStack_lock           = NULL;
 Mutex*   SATB_Q_FL_lock               = NULL;
 Monitor* SATB_Q_CBL_mon               = NULL;
@@ -122,6 +120,9 @@
 Mutex*   PerfDataManager_lock         = NULL;
 Mutex*   OopMapCacheAlloc_lock        = NULL;
 
+Mutex*   FreeList_lock                = NULL;
+Monitor* SecondaryFreeList_lock       = NULL;
+Mutex*   OldSets_lock                 = NULL;
 Mutex*   MMUTracker_lock              = NULL;
 Mutex*   HotCardCache_lock            = NULL;
 
@@ -177,8 +178,6 @@
   }
   if (UseG1GC) {
     def(CMark_lock                 , Monitor, nonleaf,     true ); // coordinate concurrent mark thread
-    def(ZF_mon                     , Monitor, leaf,        true );
-    def(Cleanup_mon                , Monitor, nonleaf,     true );
     def(CMRegionStack_lock         , Mutex,   leaf,        true );
     def(SATB_Q_FL_lock             , Mutex  , special,     true );
     def(SATB_Q_CBL_mon             , Monitor, nonleaf,     true );
@@ -188,6 +187,9 @@
     def(DirtyCardQ_CBL_mon         , Monitor, nonleaf,     true );
     def(Shared_DirtyCardQ_lock     , Mutex,   nonleaf,     true );
 
+    def(FreeList_lock              , Mutex,   leaf     ,   true );
+    def(SecondaryFreeList_lock     , Monitor, leaf     ,   true );
+    def(OldSets_lock               , Mutex  , leaf     ,   true );
     def(MMUTracker_lock            , Mutex  , leaf     ,   true );
     def(HotCardCache_lock          , Mutex  , special  ,   true );
     def(EvacFailureStack_lock      , Mutex  , nonleaf  ,   true );
--- a/src/share/vm/runtime/mutexLocker.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/mutexLocker.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,8 +76,6 @@
 extern Monitor* iCMS_lock;                       // CMS incremental mode start/stop notification
 extern Monitor* FullGCCount_lock;                // in support of "concurrent" full gc
 extern Monitor* CMark_lock;                      // used for concurrent mark thread coordination
-extern Monitor* ZF_mon;                          // used for G1 conc zero-fill.
-extern Monitor* Cleanup_mon;                     // used for G1 conc cleanup.
 extern Mutex*   CMRegionStack_lock;              // used for protecting accesses to the CM region stack
 extern Mutex*   SATB_Q_FL_lock;                  // Protects SATB Q
                                                  // buffer free list.
@@ -125,6 +123,9 @@
 extern Mutex*   ParkerFreeList_lock;
 extern Mutex*   OopMapCacheAlloc_lock;           // protects allocation of oop_map caches
 
+extern Mutex*   FreeList_lock;                   // protects the free region list during safepoints
+extern Monitor* SecondaryFreeList_lock;          // protects the secondary free region list
+extern Mutex*   OldSets_lock;                    // protects the old region sets
 extern Mutex*   MMUTracker_lock;                 // protects the MMU
                                                  // tracker data structures
 extern Mutex*   HotCardCache_lock;               // protects the hot card cache
--- a/src/share/vm/runtime/relocator.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/relocator.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -106,7 +106,7 @@
   // get the address of in the code_array
   inline char* addr_at(int bci) const             { return (char*) &code_array()[bci]; }
 
-  int  instruction_length_at(int bci)             { return Bytecodes::length_at(code_array() + bci); }
+  int  instruction_length_at(int bci)             { return Bytecodes::length_at(NULL, code_array() + bci); }
 
   // Helper methods
   int  align(int n) const                          { return (n+3) & ~3; }
--- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -944,9 +944,9 @@
   int          bci    = vfst.bci();
 
   // Find bytecode
-  Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
-  bc = bytecode->java_code();
-  int bytecode_index = bytecode->index();
+  Bytecode_invoke bytecode(caller, bci);
+  bc = bytecode.java_code();
+  int bytecode_index = bytecode.index();
 
   // Find receiver for non-static call
   if (bc != Bytecodes::_invokestatic) {
@@ -957,7 +957,7 @@
     // Caller-frame is a compiled frame
     frame callerFrame = stubFrame.sender(&reg_map2);
 
-    methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
+    methodHandle callee = bytecode.static_target(CHECK_(nullHandle));
     if (callee.is_null()) {
       THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
     }
@@ -1674,10 +1674,9 @@
   // Get target class name from the checkcast instruction
   vframeStream vfst(thread, true);
   assert(!vfst.at_end(), "Java frame must exist");
-  Bytecode_checkcast* cc = Bytecode_checkcast_at(
-    vfst.method()->bcp_from(vfst.bci()));
+  Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
   Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
-    cc->index(), thread));
+    cc.index(), thread));
   return generate_class_cast_message(objName, targetKlass->external_name());
 }
 
@@ -1711,11 +1710,11 @@
     const char* targetType = "the required signature";
     vframeStream vfst(thread, true);
     if (!vfst.at_end()) {
-      Bytecode_invoke* call = Bytecode_invoke_at(vfst.method(), vfst.bci());
+      Bytecode_invoke call(vfst.method(), vfst.bci());
       methodHandle target;
       {
         EXCEPTION_MARK;
-        target = call->static_target(THREAD);
+        target = call.static_target(THREAD);
         if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; }
       }
       if (target.not_null()
--- a/src/share/vm/runtime/thread.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/thread.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 #include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/linkResolver.hpp"
+#include "jvmtifiles/jvmtiEnv.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/instanceKlass.hpp"
@@ -177,20 +178,19 @@
 
 
 Thread::Thread() {
-  // stack
-  _stack_base   = NULL;
-  _stack_size   = 0;
-  _self_raw_id  = 0;
-  _lgrp_id      = -1;
-  _osthread     = NULL;
+  // stack and get_thread
+  set_stack_base(NULL);
+  set_stack_size(0);
+  set_self_raw_id(0);
+  set_lgrp_id(-1);
 
   // allocated data structures
+  set_osthread(NULL);
   set_resource_area(new ResourceArea());
   set_handle_area(new HandleArea(NULL));
   set_active_handles(NULL);
   set_free_handle_block(NULL);
   set_last_handle_mark(NULL);
-  set_osthread(NULL);
 
   // This initial value ==> never claimed.
   _oops_do_parity = 0;
@@ -205,6 +205,7 @@
   NOT_PRODUCT(_skip_gcalot = false;)
   CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
   _jvmti_env_iteration_count = 0;
+  set_allocated_bytes(0);
   _vm_operation_started_count = 0;
   _vm_operation_completed_count = 0;
   _current_pending_monitor = NULL;
@@ -977,6 +978,19 @@
 }
 #endif // KERNEL
 
+// General purpose hook into Java code, run once when the VM is initialized.
+// The Java library method itself may be changed independently from the VM.
+static void call_postVMInitHook(TRAPS) {
+  klassOop k = SystemDictionary::sun_misc_PostVMInitHook_klass();
+  instanceKlassHandle klass (THREAD, k);
+  if (klass.not_null()) {
+    JavaValue result(T_VOID);
+    JavaCalls::call_static(&result, klass, vmSymbolHandles::run_method_name(),
+                                           vmSymbolHandles::void_method_signature(),
+                                           CHECK);
+  }
+}
+
 static void reset_vm_info_property(TRAPS) {
   // the vm info string
   ResourceMark rm(THREAD);
@@ -1699,7 +1713,7 @@
     tlab().make_parsable(true);  // retire TLAB
   }
 
-  if (jvmti_thread_state() != NULL) {
+  if (JvmtiEnv::environments_might_exist()) {
     JvmtiExport::cleanup_thread(this);
   }
 
@@ -3231,7 +3245,7 @@
       warning("java.lang.ArithmeticException has not been initialized");
       warning("java.lang.StackOverflowError has not been initialized");
     }
-  }
+    }
 
   // See        : bugid 4211085.
   // Background : the static initializer of java.lang.Compiler tries to read
@@ -3345,6 +3359,14 @@
 
   BiasedLocking::init();
 
+  if (JDK_Version::current().post_vm_init_hook_enabled()) {
+    call_postVMInitHook(THREAD);
+    // The Java side of PostVMInitHook.run must deal with all
+    // exceptions and provide means of diagnosis.
+    if (HAS_PENDING_EXCEPTION) {
+      CLEAR_PENDING_EXCEPTION;
+    }
+  }
 
   // Start up the WatcherThread if there are any periodic tasks
   // NOTE:  All PeriodicTasks should be registered by now. If they
--- a/src/share/vm/runtime/thread.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/thread.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 class JvmtiGetLoadedClassesClosure;
 class ThreadStatistics;
 class ConcurrentLocksDump;
-class ParkEvent ;
+class ParkEvent;
 class Parker;
 
 class ciEnv;
@@ -170,7 +170,7 @@
   //
 
   // suspend/resume lock: used for self-suspend
-  Monitor*    _SR_lock;
+  Monitor* _SR_lock;
 
  protected:
   enum SuspendFlags {
@@ -194,7 +194,7 @@
  public:
   void enter_signal_handler() { _num_nested_signal++; }
   void leave_signal_handler() { _num_nested_signal--; }
-  bool is_inside_signal_handler() const  { return _num_nested_signal > 0; }
+  bool is_inside_signal_handler() const { return _num_nested_signal > 0; }
 
  private:
   // Debug tracing
@@ -215,7 +215,7 @@
 
   public:
    void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
-    HandleMark* last_handle_mark() const          { return _last_handle_mark; }
+   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
   private:
 
   // debug support for checking if code does allow safepoints or not
@@ -227,11 +227,11 @@
   //
   // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
   //
-  NOT_PRODUCT(int _allow_safepoint_count;)       // If 0, thread allow a safepoint to happen
-  debug_only (int _allow_allocation_count;)      // If 0, the thread is allowed to allocate oops.
+  NOT_PRODUCT(int _allow_safepoint_count;)      // If 0, thread allow a safepoint to happen
+  debug_only (int _allow_allocation_count;)     // If 0, the thread is allowed to allocate oops.
 
   // Used by SkipGCALot class.
-  NOT_PRODUCT(bool _skip_gcalot;)                // Should we elide gc-a-lot?
+  NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 
   // Record when GC is locked out via the GC_locker mechanism
   CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
@@ -242,24 +242,26 @@
   friend class ThreadLocalStorage;
   friend class GC_locker;
 
-  ThreadLocalAllocBuffer _tlab;                  // Thread-local eden
+  ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
+  jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
+                                                // the Java heap
 
-  int   _vm_operation_started_count;             // VM_Operation support
-  int   _vm_operation_completed_count;           // VM_Operation support
+  int   _vm_operation_started_count;            // VM_Operation support
+  int   _vm_operation_completed_count;          // VM_Operation support
 
-  ObjectMonitor* _current_pending_monitor;       // ObjectMonitor this thread
-                                                 // is waiting to lock
-  bool _current_pending_monitor_is_from_java;    // locking is from Java code
+  ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
+                                                // is waiting to lock
+  bool _current_pending_monitor_is_from_java;   // locking is from Java code
 
   // ObjectMonitor on which this thread called Object.wait()
   ObjectMonitor* _current_waiting_monitor;
 
   // Private thread-local objectmonitor list - a simple cache organized as a SLL.
  public:
-  ObjectMonitor * omFreeList ;
-  int omFreeCount ;                             // length of omFreeList
-  int omFreeProvision ;                         // reload chunk size
-  ObjectMonitor * omInUseList;                  // SLL to track monitors in circulation
+  ObjectMonitor* omFreeList;
+  int omFreeCount;                              // length of omFreeList
+  int omFreeProvision;                          // reload chunk size
+  ObjectMonitor* omInUseList;                   // SLL to track monitors in circulation
   int omInUseCount;                             // length of omInUseList
 
  public:
@@ -280,7 +282,6 @@
   // Testers
   virtual bool is_VM_thread()       const            { return false; }
   virtual bool is_Java_thread()     const            { return false; }
-  // Remove this ifdef when C1 is ported to the compiler interface.
   virtual bool is_Compiler_thread() const            { return false; }
   virtual bool is_hidden_from_external_view() const  { return false; }
   virtual bool is_jvmti_agent_thread() const         { return false; }
@@ -344,15 +345,15 @@
   // Support for Unhandled Oop detection
 #ifdef CHECK_UNHANDLED_OOPS
  private:
-  UnhandledOops *_unhandled_oops;
+  UnhandledOops* _unhandled_oops;
  public:
-  UnhandledOops* unhandled_oops()               { return _unhandled_oops; }
+  UnhandledOops* unhandled_oops() { return _unhandled_oops; }
   // Mark oop safe for gc.  It may be stack allocated but won't move.
-  void allow_unhandled_oop(oop *op)              {
+  void allow_unhandled_oop(oop *op) {
     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
   }
   // Clear oops at safepoint so crashes point to unhandled oop violator
-  void clear_unhandled_oops()                   {
+  void clear_unhandled_oops() {
     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
   }
   bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
@@ -392,6 +393,22 @@
     }
   }
 
+  jlong allocated_bytes()               { return _allocated_bytes; }
+  void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
+  void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
+  jlong cooked_allocated_bytes() {
+    jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
+    if (UseTLAB) {
+      size_t used_bytes = tlab().used_bytes();
+      if ((ssize_t)used_bytes > 0) {
+        // More-or-less valid tlab.  The load_acquire above should ensure
+        // that the result of the add is <= the instantaneous value
+        return allocated_bytes + used_bytes;
+      }
+    }
+    return allocated_bytes;
+  }
+
   // VM operation support
   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
@@ -489,8 +506,11 @@
     return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
   }
 
-  int     lgrp_id() const                 { return _lgrp_id; }
-  void    set_lgrp_id(int value)          { _lgrp_id = value; }
+  uintptr_t self_raw_id()                    { return _self_raw_id; }
+  void      set_self_raw_id(uintptr_t value) { _self_raw_id = value; }
+
+  int     lgrp_id() const        { return _lgrp_id; }
+  void    set_lgrp_id(int value) { _lgrp_id = value; }
 
   // Printing
   void print_on(outputStream* st) const;
@@ -502,7 +522,7 @@
 #ifdef ASSERT
  private:
   // Deadlock detection support for Mutex locks. List of locks own by thread.
-  Monitor *_owned_locks;
+  Monitor* _owned_locks;
   // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
   // thus the friendship
   friend class Mutex;
@@ -511,7 +531,7 @@
  public:
   void print_owned_locks_on(outputStream* st) const;
   void print_owned_locks() const                 { print_owned_locks_on(tty);    }
-  Monitor * owned_locks() const                  { return _owned_locks;          }
+  Monitor* owned_locks() const                   { return _owned_locks;          }
   bool owns_locks() const                        { return owned_locks() != NULL; }
   bool owns_locks_but_compiled_lock() const;
 
@@ -538,7 +558,7 @@
   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size ); }
 
 #define TLAB_FIELD_OFFSET(name) \
-  static ByteSize tlab_##name##_offset()            { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
+  static ByteSize tlab_##name##_offset()         { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
 
   TLAB_FIELD_OFFSET(start)
   TLAB_FIELD_OFFSET(end)
@@ -552,6 +572,8 @@
 
 #undef TLAB_FIELD_OFFSET
 
+  static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes ); }
+
  public:
   volatile intptr_t _Stalled ;
   volatile int _TypeTag ;
@@ -787,7 +809,7 @@
   //
   // _vm_exited is a special value to cover the case of a JavaThread
   // executing native code after the VM itself is terminated.
-  TerminatedTypes       _terminated;
+  volatile TerminatedTypes _terminated;
   // suspend/resume support
   volatile bool         _suspend_equivalent;     // Suspend equivalent condition
   jint                  _in_deopt_handler;       // count of deoptimization
--- a/src/share/vm/runtime/vframeArray.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/vframeArray.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -399,7 +399,7 @@
   } else if (TraceDeoptimization) {
     tty->print("     ");
     method()->print_value();
-    Bytecodes::Code code = Bytecodes::java_code_at(bcp);
+    Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
     int bci = method()->bci_from(bcp);
     tty->print(" - %s", Bytecodes::name(code));
     tty->print(" @ bci %d ", bci);
--- a/src/share/vm/runtime/vframe_hp.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/vframe_hp.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -207,8 +207,8 @@
     GrowableArray<MonitorInfo*> *monitors = new GrowableArray<MonitorInfo*>(1);
     // Casting away const
     frame& fr = (frame&) _fr;
-    MonitorInfo* info = new MonitorInfo(fr.compiled_synchronized_native_monitor_owner(nm),
-                                        fr.compiled_synchronized_native_monitor(nm), false, false);
+    MonitorInfo* info = new MonitorInfo(
+        fr.get_native_receiver(), fr.get_native_monitor(), false, false);
     monitors->push(info);
     return monitors;
   }
--- a/src/share/vm/runtime/vmStructs.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/runtime/vmStructs.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,8 +219,8 @@
   volatile_nonstatic_field(oopDesc,            _metadata._compressed_klass,                   narrowOop)                             \
      static_field(oopDesc,                     _bs,                                           BarrierSet*)                           \
   nonstatic_field(arrayKlass,                  _dimension,                                    int)                                   \
-  nonstatic_field(arrayKlass,                  _higher_dimension,                             klassOop)                              \
-  nonstatic_field(arrayKlass,                  _lower_dimension,                              klassOop)                              \
+  volatile_nonstatic_field(arrayKlass,         _higher_dimension,                             klassOop)                              \
+  volatile_nonstatic_field(arrayKlass,         _lower_dimension,                              klassOop)                              \
   nonstatic_field(arrayKlass,                  _vtable_len,                                   int)                                   \
   nonstatic_field(arrayKlass,                  _alloc_size,                                   juint)                                 \
   nonstatic_field(arrayKlass,                  _component_mirror,                             oop)                                   \
--- a/src/share/vm/services/heapDumper.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/services/heapDumper.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -453,7 +453,7 @@
 
 DumpWriter::~DumpWriter() {
   // flush and close dump file
-  if (file_descriptor() >= 0) {
+  if (is_open()) {
     close();
   }
   if (_buffer != NULL) os::free(_buffer);
@@ -463,9 +463,10 @@
 // closes dump file (if open)
 void DumpWriter::close() {
   // flush and close dump file
-  if (file_descriptor() >= 0) {
+  if (is_open()) {
     flush();
     ::close(file_descriptor());
+    set_file_descriptor(-1);
   }
 }
 
@@ -1935,18 +1936,32 @@
 void HeapDumper::dump_heap(bool oome) {
   static char base_path[JVM_MAXPATHLEN] = {'\0'};
   static uint dump_file_seq = 0;
-  char   my_path[JVM_MAXPATHLEN] = {'\0'};
+  char* my_path;
+  const int max_digit_chars = 20;
+
+  const char* dump_file_name = "java_pid";
+  const char* dump_file_ext  = ".hprof";
 
   // The dump file defaults to java_pid<pid>.hprof in the current working
   // directory. HeapDumpPath=<file> can be used to specify an alternative
   // dump file name or a directory where dump file is created.
   if (dump_file_seq == 0) { // first time in, we initialize base_path
+    // Calculate potentially longest base path and check if we have enough
+    // allocated statically.
+    const size_t total_length =
+                      (HeapDumpPath == NULL ? 0 : strlen(HeapDumpPath)) +
+                      strlen(os::file_separator()) + max_digit_chars +
+                      strlen(dump_file_name) + strlen(dump_file_ext) + 1;
+    if (total_length > sizeof(base_path)) {
+      warning("Cannot create heap dump file.  HeapDumpPath is too long.");
+      return;
+    }
+
     bool use_default_filename = true;
     if (HeapDumpPath == NULL || HeapDumpPath[0] == '\0') {
       // HeapDumpPath=<file> not specified
     } else {
-      assert(strlen(HeapDumpPath) < sizeof(base_path), "HeapDumpPath too long");
-      strcpy(base_path, HeapDumpPath);
+      strncpy(base_path, HeapDumpPath, sizeof(base_path));
       // check if the path is a directory (must exist)
       DIR* dir = os::opendir(base_path);
       if (dir == NULL) {
@@ -1960,8 +1975,6 @@
           char* end = base_path;
           end += (strlen(base_path) - fs_len);
           if (strcmp(end, os::file_separator()) != 0) {
-            assert(strlen(base_path) + strlen(os::file_separator()) < sizeof(base_path),
-              "HeapDumpPath too long");
             strcat(base_path, os::file_separator());
           }
         }
@@ -1969,21 +1982,26 @@
     }
     // If HeapDumpPath wasn't a file name then we append the default name
     if (use_default_filename) {
-      char fn[32];
-      sprintf(fn, "java_pid%d", os::current_process_id());
-      assert(strlen(base_path) + strlen(fn) + strlen(".hprof") < sizeof(base_path), "HeapDumpPath too long");
-      strcat(base_path, fn);
-      strcat(base_path, ".hprof");
+      const size_t dlen = strlen(base_path);  // if heap dump dir specified
+      jio_snprintf(&base_path[dlen], sizeof(base_path)-dlen, "%s%d%s",
+                   dump_file_name, os::current_process_id(), dump_file_ext);
     }
-    assert(strlen(base_path) < sizeof(my_path), "Buffer too small");
-    strcpy(my_path, base_path);
+    const size_t len = strlen(base_path) + 1;
+    my_path = (char*)os::malloc(len);
+    if (my_path == NULL) {
+      warning("Cannot create heap dump file.  Out of system memory.");
+      return;
+    }
+    strncpy(my_path, base_path, len);
   } else {
     // Append a sequence number id for dumps following the first
-    char fn[33];
-    sprintf(fn, ".%d", dump_file_seq);
-    assert(strlen(base_path) + strlen(fn) < sizeof(my_path), "HeapDumpPath too long");
-    strcpy(my_path, base_path);
-    strcat(my_path, fn);
+    const size_t len = strlen(base_path) + max_digit_chars + 2; // for '.' and \0
+    my_path = (char*)os::malloc(len);
+    if (my_path == NULL) {
+      warning("Cannot create heap dump file.  Out of system memory.");
+      return;
+    }
+    jio_snprintf(my_path, len, "%s.%d", base_path, dump_file_seq);
   }
   dump_file_seq++;   // increment seq number for next time we dump
 
@@ -1991,4 +2009,5 @@
                     true  /* send to tty */,
                     oome  /* pass along out-of-memory-error flag */);
   dumper.dump(my_path);
+  os::free(my_path);
 }
--- a/src/share/vm/services/jmm.h	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/services/jmm.h	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,8 @@
   unsigned int isBootClassPathSupported : 1;
   unsigned int isObjectMonitorUsageSupported : 1;
   unsigned int isSynchronizerUsageSupported : 1;
-  unsigned int : 24;
+  unsigned int isThreadAllocatedMemorySupported : 1;
+  unsigned int : 23;
 } jmmOptionalSupport;
 
 typedef enum {
@@ -105,7 +106,8 @@
   JMM_VERBOSE_GC                     = 21,
   JMM_VERBOSE_CLASS                  = 22,
   JMM_THREAD_CONTENTION_MONITORING   = 23,
-  JMM_THREAD_CPU_TIME                = 24
+  JMM_THREAD_CPU_TIME                = 24,
+  JMM_THREAD_ALLOCATED_MEMORY        = 25
 } jmmBoolAttribute;
 
 
@@ -213,7 +215,10 @@
   jobject      (JNICALL *GetMemoryPoolUsage)     (JNIEnv* env, jobject pool);
   jobject      (JNICALL *GetPeakMemoryPoolUsage) (JNIEnv* env, jobject pool);
 
-  void*        reserved4;
+  void         (JNICALL *GetThreadAllocatedMemory)
+                                                 (JNIEnv *env,
+                                                  jlongArray ids,
+                                                  jlongArray sizeArray);
 
   jobject      (JNICALL *GetMemoryUsage)         (JNIEnv* env, jboolean heap);
 
@@ -228,6 +233,8 @@
                                                   jlong* result);
 
   jobjectArray (JNICALL *FindCircularBlockedThreads) (JNIEnv *env);
+
+  // Not used in JDK 6 or JDK 7
   jlong        (JNICALL *GetThreadCpuTime)       (JNIEnv *env, jlong thread_id);
 
   jobjectArray (JNICALL *GetVMGlobalNames)       (JNIEnv *env);
@@ -262,14 +269,22 @@
   void         (JNICALL *GetLastGCStat)          (JNIEnv *env,
                                                   jobject mgr,
                                                   jmmGCStat *gc_stat);
-  jlong        (JNICALL *GetThreadCpuTimeWithKind) (JNIEnv *env,
-                                                    jlong thread_id,
-                                                    jboolean user_sys_cpu_time);
-  void*        reserved5;
+
+  jlong        (JNICALL *GetThreadCpuTimeWithKind)
+                                                 (JNIEnv *env,
+                                                  jlong thread_id,
+                                                  jboolean user_sys_cpu_time);
+  void         (JNICALL *GetThreadCpuTimesWithKind)
+                                                 (JNIEnv *env,
+                                                  jlongArray ids,
+                                                  jlongArray timeArray,
+                                                  jboolean user_sys_cpu_time);
+
   jint         (JNICALL *DumpHeap0)              (JNIEnv *env,
                                                   jstring outputfile,
                                                   jboolean live);
-  jobjectArray (JNICALL *FindDeadlocks)             (JNIEnv *env, jboolean object_monitors_only);
+  jobjectArray (JNICALL *FindDeadlocks)          (JNIEnv *env,
+                                                  jboolean object_monitors_only);
   void         (JNICALL *SetVMGlobal)            (JNIEnv *env,
                                                   jstring flag_name,
                                                   jvalue  new_value);
--- a/src/share/vm/services/management.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/services/management.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,12 +101,14 @@
     _optional_support.isCurrentThreadCpuTimeSupported = 0;
     _optional_support.isOtherThreadCpuTimeSupported = 0;
   }
+
   _optional_support.isBootClassPathSupported = 1;
   _optional_support.isObjectMonitorUsageSupported = 1;
 #ifndef SERVICES_KERNEL
   // This depends on the heap inspector
   _optional_support.isSynchronizerUsageSupported = 1;
 #endif // SERVICES_KERNEL
+  _optional_support.isThreadAllocatedMemorySupported = 1;
 }
 
 void Management::initialize(TRAPS) {
@@ -386,11 +388,6 @@
 
 static void validate_thread_id_array(typeArrayHandle ids_ah, TRAPS) {
   int num_threads = ids_ah->length();
-  // should be non-empty array
-  if (num_threads == 0) {
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-              "Empty array of thread IDs");
-  }
 
   // Validate input thread IDs
   int i = 0;
@@ -402,11 +399,9 @@
                 "Invalid thread ID entry");
     }
   }
-
 }
 
 static void validate_thread_info_array(objArrayHandle infoArray_h, TRAPS) {
-
   // check if the element of infoArray is of type ThreadInfo class
   klassOop threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK);
   klassOop element_klass = objArrayKlass::cast(infoArray_h->klass())->element_klass();
@@ -414,7 +409,6 @@
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "infoArray element type is not ThreadInfo class");
   }
-
 }
 
 
@@ -770,6 +764,45 @@
   return prev;
 JVM_END
 
+// Gets an array containing the amount of memory allocated on the Java
+// heap for a set of threads (in bytes).  Each element of the array is
+// the amount of memory allocated for the thread ID specified in the
+// corresponding entry in the given array of thread IDs; or -1 if the
+// thread does not exist or has terminated.
+JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
+                                             jlongArray sizeArray))
+  // Check if threads is null
+  if (ids == NULL || sizeArray == NULL) {
+    THROW(vmSymbols::java_lang_NullPointerException());
+  }
+
+  ResourceMark rm(THREAD);
+  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+  typeArrayHandle ids_ah(THREAD, ta);
+
+  typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
+  typeArrayHandle sizeArray_h(THREAD, sa);
+
+  // validate the thread id array
+  validate_thread_id_array(ids_ah, CHECK);
+
+  // sizeArray must be of the same length as the given array of thread IDs
+  int num_threads = ids_ah->length();
+  if (num_threads != sizeArray_h->length()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "The length of the given long array does not match the length of "
+              "the given array of thread IDs");
+  }
+
+  MutexLockerEx ml(Threads_lock);
+  for (int i = 0; i < num_threads; i++) {
+    JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
+    if (java_thread != NULL) {
+      sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
+    }
+  }
+JVM_END
+
 // Returns a java/lang/management/MemoryUsage object representing
 // the memory usage for the heap or non-heap memory.
 JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
@@ -834,6 +867,8 @@
     return ThreadService::is_thread_monitoring_contention();
   case JMM_THREAD_CPU_TIME:
     return ThreadService::is_thread_cpu_time_enabled();
+  case JMM_THREAD_ALLOCATED_MEMORY:
+    return ThreadService::is_thread_allocated_memory_enabled();
   default:
     assert(0, "Unrecognized attribute");
     return false;
@@ -851,6 +886,8 @@
     return ThreadService::set_thread_monitoring_contention(flag != 0);
   case JMM_THREAD_CPU_TIME:
     return ThreadService::set_thread_cpu_time_enabled(flag != 0);
+  case JMM_THREAD_ALLOCATED_MEMORY:
+    return ThreadService::set_thread_allocated_memory_enabled(flag != 0);
   default:
     assert(0, "Unrecognized attribute");
     return false;
@@ -1096,6 +1133,7 @@
 //               maxDepth == 0  requests no stack trace.
 //   infoArray - array of ThreadInfo objects
 //
+// QQQ - Why does this method return a value instead of void?
 JVM_ENTRY(jint, jmm_GetThreadInfo(JNIEnv *env, jlongArray ids, jint maxDepth, jobjectArray infoArray))
   // Check if threads is null
   if (ids == NULL || infoArray == NULL) {
@@ -1159,7 +1197,6 @@
     }
   } else {
     // obtain thread dump with the specific list of threads with stack trace
-
     do_thread_dump(&dump_result,
                    ids_ah,
                    num_threads,
@@ -1252,8 +1289,6 @@
       continue;
     }
 
-
-
     ThreadStackTrace* stacktrace = ts->get_stack_trace();
     assert(stacktrace != NULL, "Must have a stack trace dumped");
 
@@ -1500,6 +1535,49 @@
   return -1;
 JVM_END
 
+// Gets an array containing the CPU times consumed by a set of threads
+// (in nanoseconds).  Each element of the array is the CPU time for the
+// thread ID specified in the corresponding entry in the given array
+// of thread IDs; or -1 if the thread does not exist or has terminated.
+// If user_sys_cpu_time = true, the sum of user level and system CPU time
+// for the given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
+                                              jlongArray timeArray,
+                                              jboolean user_sys_cpu_time))
+  // Check if threads is null
+  if (ids == NULL || timeArray == NULL) {
+    THROW(vmSymbols::java_lang_NullPointerException());
+  }
+
+  ResourceMark rm(THREAD);
+  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+  typeArrayHandle ids_ah(THREAD, ta);
+
+  typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
+  typeArrayHandle timeArray_h(THREAD, tia);
+
+  // validate the thread id array
+  validate_thread_id_array(ids_ah, CHECK);
+
+  // timeArray must be of the same length as the given array of thread IDs
+  int num_threads = ids_ah->length();
+  if (num_threads != timeArray_h->length()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "The length of the given long array does not match the length of "
+              "the given array of thread IDs");
+  }
+
+  MutexLockerEx ml(Threads_lock);
+  for (int i = 0; i < num_threads; i++) {
+    JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
+    if (java_thread != NULL) {
+      timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
+                                                      user_sys_cpu_time != 0));
+    }
+  }
+JVM_END
+
 // Returns a String array of all VM global flag names
 JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
   // last flag entry is always NULL, so subtract 1
@@ -2020,7 +2098,7 @@
   jmm_GetMemoryManagers,
   jmm_GetMemoryPoolUsage,
   jmm_GetPeakMemoryPoolUsage,
-  NULL,
+  jmm_GetThreadAllocatedMemory,
   jmm_GetMemoryUsage,
   jmm_GetLongAttribute,
   jmm_GetBoolAttribute,
@@ -2038,7 +2116,7 @@
   jmm_GetGCExtAttributeInfo,
   jmm_GetLastGCStat,
   jmm_GetThreadCpuTimeWithKind,
-  NULL,
+  jmm_GetThreadCpuTimesWithKind,
   jmm_DumpHeap0,
   jmm_FindDeadlockedThreads,
   jmm_SetVMGlobal,
--- a/src/share/vm/services/threadService.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/services/threadService.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
 // Default is disabled.
 bool ThreadService::_thread_monitoring_contention_enabled = false;
 bool ThreadService::_thread_cpu_time_enabled = false;
+bool ThreadService::_thread_allocated_memory_enabled = false;
 
 PerfCounter*  ThreadService::_total_threads_count = NULL;
 PerfVariable* ThreadService::_live_threads_count = NULL;
@@ -84,6 +85,8 @@
   if (os::is_thread_cpu_time_supported()) {
     _thread_cpu_time_enabled = true;
   }
+
+  _thread_allocated_memory_enabled = true; // Always on, so enable it
 }
 
 void ThreadService::reset_peak_thread_count() {
@@ -181,6 +184,15 @@
   return prev;
 }
 
+bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
+  MutexLocker m(Management_lock);
+
+  bool prev = _thread_allocated_memory_enabled;
+  _thread_allocated_memory_enabled = flag;
+
+  return prev;
+}
+
 // GC support
 void ThreadService::oops_do(OopClosure* f) {
   for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
--- a/src/share/vm/services/threadService.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/services/threadService.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,6 +65,7 @@
 
   static bool          _thread_monitoring_contention_enabled;
   static bool          _thread_cpu_time_enabled;
+  static bool          _thread_allocated_memory_enabled;
 
   // Need to keep the list of thread dump result that
   // keep references to methodOop since thread dump can be
@@ -83,6 +84,9 @@
   static bool set_thread_cpu_time_enabled(bool flag);
   static bool is_thread_cpu_time_enabled()    { return _thread_cpu_time_enabled; }
 
+  static bool set_thread_allocated_memory_enabled(bool flag);
+  static bool is_thread_allocated_memory_enabled() { return _thread_cpu_time_enabled; }
+
   static jlong get_total_thread_count()       { return _total_threads_count->get_value(); }
   static jlong get_peak_thread_count()        { return _peak_threads_count->get_value(); }
   static jlong get_live_thread_count()        { return _live_threads_count->get_value() - _exiting_threads_count; }
--- a/src/share/vm/utilities/debug.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/utilities/debug.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -226,7 +226,7 @@
 
 void report_vm_out_of_memory(const char* file, int line, size_t size,
                              const char* message) {
-  if (Debugging || error_is_suppressed(file, line)) return;
+  if (Debugging) return;
 
   // We try to gather additional information for the first out of memory
   // error only; gathering additional data might cause an allocation and a
--- a/src/share/vm/utilities/debug.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/utilities/debug.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -34,6 +34,7 @@
 class FormatBuffer {
 public:
   inline FormatBuffer(const char * format, ...);
+  inline void append(const char* format, ...);
   operator const char *() const { return _buf; }
 
 private:
@@ -51,6 +52,19 @@
   va_end(argp);
 }
 
+template <size_t bufsz>
+void FormatBuffer<bufsz>::append(const char* format, ...) {
+  // Given that the constructor does a vsnprintf we can assume that
+  // _buf is already initialized.
+  size_t len = strlen(_buf);
+  char* buf_end = _buf + len;
+
+  va_list argp;
+  va_start(argp, format);
+  vsnprintf(buf_end, bufsz - len, format, argp);
+  va_end(argp);
+}
+
 // Used to format messages for assert(), guarantee(), fatal(), etc.
 typedef FormatBuffer<> err_msg;
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/decoder.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "prims/jvm.h"
+#include "utilities/decoder.hpp"
+
+Decoder::decoder_status  Decoder::_decoder_status = Decoder::no_error;
+bool                     Decoder::_initialized = false;
+
+#ifndef _WINDOWS
+
+// Implementation of common functionalities among Solaris and Linux
+#include "utilities/elfFile.hpp"
+
+ElfFile* Decoder::_opened_elf_files = NULL;
+
+bool Decoder::can_decode_C_frame_in_vm() {
+  return true;
+}
+
+void Decoder::initialize() {
+  _initialized = true;
+}
+
+void Decoder::uninitialize() {
+  if (_opened_elf_files != NULL) {
+    delete _opened_elf_files;
+    _opened_elf_files = NULL;
+  }
+  _initialized = false;
+}
+
+Decoder::decoder_status Decoder::decode(address addr, const char* filepath, char *buf, int buflen, int *offset) {
+  if (_decoder_status != no_error) {
+    return _decoder_status;
+  }
+
+  ElfFile* file = get_elf_file(filepath);
+  if (_decoder_status != no_error) {
+    return _decoder_status;
+  }
+
+  const char* symbol = file->decode(addr, offset);
+  if (file->get_status() == out_of_memory) {
+    _decoder_status = out_of_memory;
+    return _decoder_status;
+  } else if (symbol != NULL) {
+    if (!demangle(symbol, buf, buflen)) {
+      jio_snprintf(buf, buflen, "%s", symbol);
+    }
+    return no_error;
+  } else {
+    return symbol_not_found;
+  }
+}
+
+ElfFile* Decoder::get_elf_file(const char* filepath) {
+  if (_decoder_status != no_error) {
+    return NULL;
+  }
+  ElfFile* file = _opened_elf_files;
+  while (file != NULL) {
+    if (file->same_elf_file(filepath)) {
+      return file;
+    }
+    file = file->m_next;
+  }
+
+  file = new ElfFile(filepath);
+  if (file == NULL) {
+    _decoder_status = out_of_memory;
+  }
+  if (_opened_elf_files != NULL) {
+    file->m_next = _opened_elf_files;
+  }
+
+  _opened_elf_files = file;
+  return file;
+}
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/decoder.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#ifndef __DECODER_HPP
+#define __DECODER_HPP
+
+#include "memory/allocation.hpp"
+
+#ifdef _WINDOWS
+#include <windows.h>
+#include <imagehlp.h>
+
+// functions needed for decoding symbols
+typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
+typedef BOOL  (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
+typedef BOOL  (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
+typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
+
+#else
+
+class ElfFile;
+
+#endif // _WINDOWS
+
+
+class Decoder: public StackObj {
+
+ public:
+  // status code for decoding native C frame
+  enum decoder_status {
+         no_error,             // successfully decoded frames
+         out_of_memory,        // out of memory
+         file_invalid,         // invalid elf file
+         file_not_found,       // could not found symbol file (on windows), such as jvm.pdb or jvm.map
+         helper_not_found,     // could not load dbghelp.dll (Windows only)
+         helper_func_error,    // decoding functions not found (Windows only)
+         helper_init_error,    // SymInitialize failed (Windows only)
+         symbol_not_found      // could not find the symbol
+  };
+
+ public:
+  Decoder() { initialize(); };
+  ~Decoder() { uninitialize(); };
+
+  static bool can_decode_C_frame_in_vm();
+
+  static void initialize();
+  static void uninitialize();
+
+#ifdef _WINDOWS
+  static decoder_status    decode(address addr, char *buf, int buflen, int *offset);
+#else
+  static decoder_status    decode(address addr, const char* filepath, char *buf, int buflen, int *offset);
+#endif
+
+  static bool              demangle(const char* symbol, char *buf, int buflen);
+
+  static decoder_status    get_status() { return _decoder_status; };
+
+#ifndef _WINDOWS
+ private:
+  static ElfFile*         get_elf_file(const char* filepath);
+#endif // _WINDOWS
+
+
+ private:
+  static decoder_status     _decoder_status;
+  static bool               _initialized;
+
+#ifdef _WINDOWS
+  static HMODULE                   _dbghelp_handle;
+  static bool                      _can_decode_in_vm;
+  static pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
+  static pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
+#else
+  static ElfFile*                  _opened_elf_files;
+#endif // _WINDOWS
+};
+
+#endif // __DECODER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfFile.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#ifndef _WINDOWS
+
+#include <string.h>
+#include <stdio.h>
+#include <limits.h>
+
+#include "memory/allocation.inline.hpp"
+#include "utilities/decoder.hpp"
+#include "utilities/elfFile.hpp"
+#include "utilities/elfStringTable.hpp"
+#include "utilities/elfSymbolTable.hpp"
+
+
+ElfFile::ElfFile(const char* filepath) {
+  assert(filepath, "null file path");
+  memset(&m_elfHdr, 0, sizeof(m_elfHdr));
+  m_string_tables = NULL;
+  m_symbol_tables = NULL;
+  m_next = NULL;
+  m_status = Decoder::no_error;
+
+  int len = strlen(filepath) + 1;
+  m_filepath = NEW_C_HEAP_ARRAY(char, len);
+  if (m_filepath != NULL) {
+    strcpy((char*)m_filepath, filepath);
+    m_file = fopen(filepath, "r");
+    if (m_file != NULL) {
+      load_tables();
+    } else {
+      m_status = Decoder::file_not_found;
+    }
+  } else {
+    m_status = Decoder::out_of_memory;
+  }
+}
+
+ElfFile::~ElfFile() {
+  if (m_string_tables != NULL) {
+    delete m_string_tables;
+  }
+
+  if (m_symbol_tables != NULL) {
+    delete m_symbol_tables;
+  }
+
+  if (m_file != NULL) {
+    fclose(m_file);
+  }
+
+  if (m_filepath != NULL) {
+    FREE_C_HEAP_ARRAY(char, m_filepath);
+  }
+
+  if (m_next != NULL) {
+    delete m_next;
+  }
+};
+
+
+//Check elf header to ensure the file is valid.
+bool ElfFile::is_elf_file(Elf_Ehdr& hdr) {
+  return (ELFMAG0 == hdr.e_ident[EI_MAG0] &&
+      ELFMAG1 == hdr.e_ident[EI_MAG1] &&
+      ELFMAG2 == hdr.e_ident[EI_MAG2] &&
+      ELFMAG3 == hdr.e_ident[EI_MAG3] &&
+      ELFCLASSNONE != hdr.e_ident[EI_CLASS] &&
+      ELFDATANONE != hdr.e_ident[EI_DATA]);
+}
+
+bool ElfFile::load_tables() {
+  assert(m_file, "file not open");
+  assert(m_status == Decoder::no_error, "already in error");
+
+  // read elf file header
+  if (fread(&m_elfHdr, sizeof(m_elfHdr), 1, m_file) != 1) {
+    m_status = Decoder::file_invalid;
+    return false;
+  }
+
+  if (!is_elf_file(m_elfHdr)) {
+    m_status = Decoder::file_invalid;
+    return false;
+  }
+
+  // walk elf file's section headers, and load string tables
+  Elf_Shdr shdr;
+  if (!fseek(m_file, m_elfHdr.e_shoff, SEEK_SET)) {
+    if (m_status != Decoder::no_error) return false;
+
+    for (int index = 0; index < m_elfHdr.e_shnum; index ++) {
+      if (fread((void*)&shdr, sizeof(Elf_Shdr), 1, m_file) != 1) {
+        m_status = Decoder::file_invalid;
+        return false;
+      }
+      // string table
+      if (shdr.sh_type == SHT_STRTAB) {
+        ElfStringTable* table = new ElfStringTable(m_file, shdr, index);
+        if (table == NULL) {
+          m_status = Decoder::out_of_memory;
+          return false;
+        }
+        add_string_table(table);
+      } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) {
+        ElfSymbolTable* table = new ElfSymbolTable(m_file, shdr);
+        if (table == NULL) {
+          m_status = Decoder::out_of_memory;
+          return false;
+        }
+        add_symbol_table(table);
+      }
+    }
+  }
+  return true;
+}
+
+const char* ElfFile::decode(address addr, int* offset) {
+  // something already went wrong, just give up
+  if (m_status != Decoder::no_error) {
+    return NULL;
+  }
+
+  ElfSymbolTable* symbol_table = m_symbol_tables;
+  int string_table_index;
+  int pos_in_string_table;
+  int off = INT_MAX;
+  bool found_symbol = false;
+  while (symbol_table != NULL) {
+    if (Decoder::no_error == symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off)) {
+      found_symbol = true;
+    }
+    symbol_table = symbol_table->m_next;
+  }
+  if (!found_symbol) return NULL;
+
+  ElfStringTable* string_table = get_string_table(string_table_index);
+  if (string_table == NULL) {
+    m_status = Decoder::file_invalid;
+    return NULL;
+  }
+  if (offset) *offset = off;
+  return string_table->string_at(pos_in_string_table);
+}
+
+
+void ElfFile::add_symbol_table(ElfSymbolTable* table) {
+  if (m_symbol_tables == NULL) {
+    m_symbol_tables = table;
+  } else {
+    table->m_next = m_symbol_tables;
+    m_symbol_tables = table;
+  }
+}
+
+void ElfFile::add_string_table(ElfStringTable* table) {
+  if (m_string_tables == NULL) {
+    m_string_tables = table;
+  } else {
+    table->m_next = m_string_tables;
+    m_string_tables = table;
+  }
+}
+
+ElfStringTable* ElfFile::get_string_table(int index) {
+  ElfStringTable* p = m_string_tables;
+  while (p != NULL) {
+    if (p->index() == index) return p;
+    p = p->m_next;
+  }
+  return NULL;
+}
+
+#endif // _WINDOWS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfFile.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef __ELF_FILE_HPP
+#define __ELF_FILE_HPP
+
+#ifndef _WINDOWS
+
+#include <elf.h>
+#include <stdio.h>
+
+#ifdef _LP64
+
+typedef Elf64_Half      Elf_Half;
+typedef Elf64_Word      Elf_Word;
+typedef Elf64_Off       Elf_Off;
+typedef Elf64_Addr      Elf_Addr;
+
+typedef Elf64_Ehdr      Elf_Ehdr;
+typedef Elf64_Shdr      Elf_Shdr;
+typedef Elf64_Sym       Elf_Sym;
+
+#define ELF_ST_TYPE ELF64_ST_TYPE
+
+#else
+
+typedef Elf32_Half      Elf_Half;
+typedef Elf32_Word      Elf_Word;
+typedef Elf32_Off       Elf_Off;
+typedef Elf32_Addr      Elf_Addr;
+
+
+typedef Elf32_Ehdr      Elf_Ehdr;
+typedef Elf32_Shdr      Elf_Shdr;
+typedef Elf32_Sym       Elf_Sym;
+
+#define ELF_ST_TYPE ELF32_ST_TYPE
+#endif
+
+#include "globalDefinitions.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/decoder.hpp"
+
+
+class ElfStringTable;
+class ElfSymbolTable;
+
+
+// On Solaris/Linux platforms, libjvm.so does contain all private symbols.
+// ElfFile is basically an elf file parser, which can lookup the symbol
+// that is the nearest to the given address.
+// Beware, this code is called from vm error reporting code, when vm is already
+// in "error" state, so there are scenarios, lookup will fail. We want this
+// part of code to be very defensive, and bait out if anything went wrong.
+
+class ElfFile: public CHeapObj {
+  friend class Decoder;
+ public:
+  ElfFile(const char* filepath);
+  ~ElfFile();
+
+  const char* decode(address addr, int* offset);
+  const char* filepath() {
+    return m_filepath;
+  }
+
+  bool same_elf_file(const char* filepath) {
+    assert(filepath, "null file path");
+    assert(m_filepath, "already out of memory");
+    return (m_filepath && !strcmp(filepath, m_filepath));
+  }
+
+  Decoder::decoder_status get_status() {
+    return m_status;
+  }
+
+ private:
+  // sanity check, if the file is a real elf file
+  bool is_elf_file(Elf_Ehdr&);
+
+  // load string tables from the elf file
+  bool load_tables();
+
+  // string tables are stored in a linked list
+  void add_string_table(ElfStringTable* table);
+
+  // symbol tables are stored in a linked list
+  void add_symbol_table(ElfSymbolTable* table);
+
+  // return a string table at specified section index
+  ElfStringTable* get_string_table(int index);
+
+  // look up an address and return the nearest symbol
+  const char* look_up(Elf_Shdr shdr, address addr, int* offset);
+
+ protected:
+    ElfFile*         m_next;
+
+ private:
+  // file
+  const char* m_filepath;
+  FILE* m_file;
+
+  // Elf header
+  Elf_Ehdr            m_elfHdr;
+
+  // symbol tables
+  ElfSymbolTable*     m_symbol_tables;
+
+  // string tables
+  ElfStringTable*     m_string_tables;
+
+  Decoder::decoder_status  m_status;
+};
+
+#endif // _WINDOWS
+
+#endif // __ELF_FILE_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfStringTable.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#ifndef _WINDOWS
+
+#include "memory/allocation.inline.hpp"
+#include "utilities/elfStringTable.hpp"
+
+// We will try to load whole string table into memory if we can.
+// Otherwise, fallback to more expensive file operation.
+ElfStringTable::ElfStringTable(FILE* file, Elf_Shdr shdr, int index) {
+  assert(file, "null file handle");
+  m_table = NULL;
+  m_index = index;
+  m_next = NULL;
+  m_file = file;
+  m_status = Decoder::no_error;
+
+  // try to load the string table
+  long cur_offset = ftell(file);
+  m_table = (char*)NEW_C_HEAP_ARRAY(char, shdr.sh_size);
+  if (m_table != NULL) {
+    // if there is an error, mark the error
+    if (fseek(file, shdr.sh_offset, SEEK_SET) ||
+      fread((void*)m_table, shdr.sh_size, 1, file) != 1 ||
+      fseek(file, cur_offset, SEEK_SET)) {
+      m_status = Decoder::file_invalid;
+      FREE_C_HEAP_ARRAY(char, m_table);
+      m_table = NULL;
+    }
+  } else {
+    memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr));
+  }
+}
+
+ElfStringTable::~ElfStringTable() {
+  if (m_table != NULL) {
+    FREE_C_HEAP_ARRAY(char, m_table);
+  }
+
+  if (m_next != NULL) {
+    delete m_next;
+  }
+}
+
+const char* ElfStringTable::string_at(int pos) {
+  if (m_status != Decoder::no_error) {
+    return NULL;
+  }
+  if (m_table != NULL) {
+    return (const char*)(m_table + pos);
+  } else {
+    long cur_pos = ftell(m_file);
+    if (cur_pos == -1 ||
+      fseek(m_file, m_shdr.sh_offset + pos, SEEK_SET) ||
+      fread(m_symbol, 1, MAX_SYMBOL_LEN, m_file) <= 0 ||
+      fseek(m_file, cur_pos, SEEK_SET)) {
+      m_status = Decoder::file_invalid;
+      return NULL;
+    }
+    return (const char*)m_symbol;
+  }
+}
+
+#endif // _WINDOWS
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfStringTable.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef __ELF_STRING_TABLE_HPP
+#define __ELF_STRING_TABLE_HPP
+
+#ifndef _WINDOWS
+
+#include "memory/allocation.hpp"
+#include "utilities/decoder.hpp"
+#include "utilities/elfFile.hpp"
+
+
+// The string table represents a string table section in an elf file.
+// Whenever there is enough memory, it will load whole string table as
+// one blob. Otherwise, it will load string from file when requested.
+
+#define MAX_SYMBOL_LEN  256
+
+class ElfStringTable: CHeapObj {
+  friend class ElfFile;
+ public:
+  ElfStringTable(FILE* file, Elf_Shdr shdr, int index);
+  ~ElfStringTable();
+
+  // section index
+  int index() { return m_index; };
+
+  // get string at specified offset
+  const char* string_at(int offset);
+
+  // get status code
+  Decoder::decoder_status get_status() { return m_status; };
+
+ protected:
+  ElfStringTable*        m_next;
+
+  // section index
+  int                      m_index;
+
+  // holds complete string table if can
+  // allocate enough memory
+  const char*              m_table;
+
+  // file contains string table
+  FILE*                    m_file;
+
+  // section header
+  Elf_Shdr                 m_shdr;
+
+  // buffer for reading individual string
+  char                     m_symbol[MAX_SYMBOL_LEN];
+
+  // error code
+  Decoder::decoder_status  m_status;
+};
+
+#endif // _WINDOWS
+
+#endif // __ELF_STRING_TABLE_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfSymbolTable.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#ifndef _WINDOWS
+
+#include "memory/allocation.inline.hpp"
+#include "utilities/elfSymbolTable.hpp"
+
+ElfSymbolTable::ElfSymbolTable(FILE* file, Elf_Shdr shdr) {
+  assert(file, "null file handle");
+  m_symbols = NULL;
+  m_next = NULL;
+  m_file = file;
+  m_status = Decoder::no_error;
+
+  // try to load the string table
+  long cur_offset = ftell(file);
+  if (cur_offset != -1) {
+    m_symbols = (Elf_Sym*)NEW_C_HEAP_ARRAY(char, shdr.sh_size);
+    if (m_symbols) {
+      if (fseek(file, shdr.sh_offset, SEEK_SET) ||
+        fread((void*)m_symbols, shdr.sh_size, 1, file) != 1 ||
+        fseek(file, cur_offset, SEEK_SET)) {
+        m_status = Decoder::file_invalid;
+        FREE_C_HEAP_ARRAY(char, m_symbols);
+        m_symbols = NULL;
+      }
+    }
+    if (m_status == Decoder::no_error) {
+      memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr));
+    }
+  } else {
+    m_status = Decoder::file_invalid;
+  }
+}
+
+ElfSymbolTable::~ElfSymbolTable() {
+  if (m_symbols != NULL) {
+    FREE_C_HEAP_ARRAY(char, m_symbols);
+  }
+
+  if (m_next != NULL) {
+    delete m_next;
+  }
+}
+
+Decoder::decoder_status ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex, int* offset) {
+  assert(stringtableIndex, "null string table index pointer");
+  assert(posIndex, "null string table offset pointer");
+  assert(offset, "null offset pointer");
+
+  if (m_status != Decoder::no_error) {
+    return m_status;
+  }
+
+  address pc = 0;
+  size_t  sym_size = sizeof(Elf_Sym);
+  assert((m_shdr.sh_size % sym_size) == 0, "check size");
+  int count = m_shdr.sh_size / sym_size;
+  if (m_symbols != NULL) {
+    for (int index = 0; index < count; index ++) {
+      if (STT_FUNC == ELF_ST_TYPE(m_symbols[index].st_info)) {
+        address sym_addr = (address)m_symbols[index].st_value;
+        if (sym_addr < addr && (addr - sym_addr) < *offset) {
+          pc = (address)m_symbols[index].st_value;
+          *offset = (int)(addr - pc);
+          *posIndex = m_symbols[index].st_name;
+          *stringtableIndex = m_shdr.sh_link;
+        }
+      }
+    }
+  } else {
+    long cur_pos;
+    if ((cur_pos = ftell(m_file)) == -1 ||
+      fseek(m_file, m_shdr.sh_offset, SEEK_SET)) {
+      m_status = Decoder::file_invalid;
+      return m_status;
+    }
+
+    Elf_Sym sym;
+    for (int index = 0; index < count; index ++) {
+      if (fread(&sym, sym_size, 1, m_file) == 1) {
+        if (STT_FUNC == ELF_ST_TYPE(sym.st_info)) {
+          address sym_addr = (address)sym.st_value;
+          if (sym_addr < addr && (addr - sym_addr) < *offset) {
+            pc = (address)sym.st_value;
+            *offset = (int)(addr - pc);
+            *posIndex = sym.st_name;
+            *stringtableIndex = m_shdr.sh_link;
+          }
+        }
+      } else {
+        m_status = Decoder::file_invalid;
+        return m_status;
+      }
+    }
+    fseek(m_file, cur_pos, SEEK_SET);
+  }
+  return m_status;
+}
+
+#endif // _WINDOWS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfSymbolTable.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef __ELF_SYMBOL_TABLE_HPP
+#define __ELF_SYMBOL_TABLE_HPP
+
+#ifndef _WINDOWS
+
+
+#include "memory/allocation.hpp"
+#include "utilities/decoder.hpp"
+#include "utilities/elfFile.hpp"
+
+/*
+ * symbol table object represents a symbol section in an elf file.
+ * Whenever possible, it will load all symbols from the corresponding section
+ * of the elf file into memory. Otherwise, it will walk the section in file
+ * to look up the symbol that nearest the given address.
+ */
+class ElfSymbolTable: public CHeapObj {
+  friend class ElfFile;
+ public:
+  ElfSymbolTable(FILE* file, Elf_Shdr shdr);
+  ~ElfSymbolTable();
+
+  // search the symbol that is nearest to the specified address.
+  Decoder::decoder_status lookup(address addr, int* stringtableIndex, int* posIndex, int* offset);
+
+  Decoder::decoder_status get_status() { return m_status; };
+
+ protected:
+  ElfSymbolTable*  m_next;
+
+  // holds a complete symbol table section if
+  // can allocate enough memory
+  Elf_Sym*            m_symbols;
+
+  // file contains string table
+  FILE*               m_file;
+
+  // section header
+  Elf_Shdr            m_shdr;
+
+  Decoder::decoder_status  m_status;
+};
+
+#endif // _WINDOWS
+
+#endif // __ELF_SYMBOL_TABLE_HPP
+
+
+
--- a/src/share/vm/utilities/globalDefinitions.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1179,6 +1179,8 @@
 // '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll"
 // (in ILP32).
 
+#define BOOL_TO_STR(__b) (__b) ? "true" : "false"
+
 // Format 32-bit quantities.
 #define INT32_FORMAT  "%d"
 #define UINT32_FORMAT "%u"
--- a/src/share/vm/utilities/vmError.cpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/utilities/vmError.cpp	Fri Jan 21 13:03:13 2011 -0800
@@ -33,6 +33,7 @@
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/top.hpp"
 #include "utilities/vmError.hpp"
@@ -66,7 +67,7 @@
 // threads are blocked forever inside report_and_die().
 
 // Constructor for crashes
-VMError::VMError(Thread* thread, int sig, address pc, void* siginfo, void* context) {
+VMError::VMError(Thread* thread, unsigned int sig, address pc, void* siginfo, void* context) {
     _thread = thread;
     _id = sig;
     _pc   = pc;
@@ -321,29 +322,51 @@
 
   STEP(10, "(printing fatal error message)")
 
-     st->print_cr("#");
-     st->print_cr("# A fatal error has been detected by the Java Runtime Environment:");
+    st->print_cr("#");
+    if (should_report_bug(_id)) {
+      st->print_cr("# A fatal error has been detected by the Java Runtime Environment:");
+    } else {
+      st->print_cr("# There is insufficient memory for the Java "
+                   "Runtime Environment to continue.");
+    }
 
   STEP(15, "(printing type of error)")
 
      switch(_id) {
        case oom_error:
-         st->print_cr("#");
-         st->print("# java.lang.OutOfMemoryError: ");
          if (_size) {
-           st->print("requested ");
-           sprintf(buf,SIZE_FORMAT,_size);
+           st->print("# Native memory allocation (malloc) failed to allocate ");
+           jio_snprintf(buf, sizeof(buf), SIZE_FORMAT, _size);
            st->print(buf);
            st->print(" bytes");
            if (_message != NULL) {
              st->print(" for ");
              st->print(_message);
            }
-           st->print_cr(". Out of swap space?");
+           st->cr();
          } else {
            if (_message != NULL)
+             st->print("# ");
              st->print_cr(_message);
          }
+         // In error file give some solutions
+         if (_verbose) {
+           st->print_cr("# Possible reasons:");
+           st->print_cr("#   The system is out of physical RAM or swap space");
+           st->print_cr("#   In 32 bit mode, the process size limit was hit");
+           st->print_cr("# Possible solutions:");
+           st->print_cr("#   Reduce memory load on the system");
+           st->print_cr("#   Increase physical memory or swap space");
+           st->print_cr("#   Check if swap backing store is full");
+           st->print_cr("#   Use 64 bit Java on a 64 bit OS");
+           st->print_cr("#   Decrease Java heap size (-Xmx/-Xms)");
+           st->print_cr("#   Decrease number of Java threads");
+           st->print_cr("#   Decrease Java thread stack sizes (-Xss)");
+           st->print_cr("#   Set larger code cache with -XX:ReservedCodeCacheSize=");
+           st->print_cr("# This output file may be truncated or incomplete.");
+         } else {
+           return;  // that's enough for the screen
+         }
          break;
        case internal_error:
        default:
@@ -360,7 +383,11 @@
        st->print(" (0x%x)", _id);                // signal number
        st->print(" at pc=" PTR_FORMAT, _pc);
      } else {
-       st->print("Internal Error");
+       if (should_report_bug(_id)) {
+         st->print("Internal Error");
+       } else {
+         st->print("Out of Memory Error");
+       }
        if (_filename != NULL && _lineno > 0) {
 #ifdef PRODUCT
          // In product mode chop off pathname?
@@ -392,12 +419,14 @@
 
   STEP(40, "(printing error message)")
 
-     // error message
-     if (_detail_msg) {
-       st->print_cr("#  %s: %s", _message ? _message : "Error", _detail_msg);
-     } else if (_message) {
-       st->print_cr("#  Error: %s", _message);
-     }
+     if (should_report_bug(_id)) {  // already printed the message.
+       // error message
+       if (_detail_msg) {
+         st->print_cr("#  %s: %s", _message ? _message : "Error", _detail_msg);
+       } else if (_message) {
+         st->print_cr("#  Error: %s", _message);
+       }
+    }
 
   STEP(50, "(printing Java version string)")
 
@@ -427,7 +456,9 @@
 
   STEP(65, "(printing bug submit message)")
 
-     if (_verbose) print_bug_submit_message(st, _thread);
+     if (should_report_bug(_id) && _verbose) {
+       print_bug_submit_message(st, _thread);
+     }
 
   STEP(70, "(printing thread)" )
 
@@ -516,8 +547,10 @@
        if (fr.pc()) {
           st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
 
+          // initialize decoder to decode C frames
+          Decoder decoder;
+
           int count = 0;
-
           while (count++ < StackPrintLimit) {
              fr.print_on_error(st, buf, sizeof(buf));
              st->cr();
@@ -841,11 +874,13 @@
       }
 
       if (fd == -1) {
-        // try temp directory
         const char * tmpdir = os::get_temp_directory();
-        jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
-                     tmpdir, os::file_separator(), os::current_process_id());
-        fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+        // try temp directory if it exists.
+        if (tmpdir != NULL && tmpdir[0] != '\0') {
+          jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
+                       tmpdir, os::file_separator(), os::current_process_id());
+          fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+        }
       }
 
       if (fd != -1) {
@@ -903,7 +938,7 @@
     OnError = NULL;
   }
 
-  static bool skip_bug_url = false;
+  static bool skip_bug_url = !should_report_bug(first_error->_id);
   if (!skip_bug_url) {
     skip_bug_url = true;
 
@@ -916,7 +951,8 @@
     static bool skip_os_abort = false;
     if (!skip_os_abort) {
       skip_os_abort = true;
-      os::abort();
+      bool dump_core = should_report_bug(first_error->_id);
+      os::abort(dump_core);
     }
 
     // if os::abort() doesn't abort, try os::die();
--- a/src/share/vm/utilities/vmError.hpp	Fri Jan 21 13:01:02 2011 -0800
+++ b/src/share/vm/utilities/vmError.hpp	Fri Jan 21 13:03:13 2011 -0800
@@ -87,10 +87,12 @@
   // accessor
   const char* message() const    { return _message; }
   const char* detail_msg() const { return _detail_msg; }
+  bool should_report_bug(unsigned int id) { return id != oom_error; }
 
 public:
   // Constructor for crashes
-  VMError(Thread* thread, int sig, address pc, void* siginfo, void* context);
+  VMError(Thread* thread, unsigned int sig, address pc, void* siginfo,
+          void* context);
   // Constructor for VM internal errors
   VMError(Thread* thread, const char* filename, int lineno,
           const char* message, const char * detail_msg);
--- a/test/Makefile	Fri Jan 21 13:01:02 2011 -0800
+++ b/test/Makefile	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1995, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6431242/Test.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/test/compiler/6431242/Test.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6857159/Test6857159.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/test/compiler/6857159/Test6857159.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6877254/Test.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/test/compiler/6877254/Test.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6895383/Test.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/test/compiler/6895383/Test.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6896727/Test.java	Fri Jan 21 13:01:02 2011 -0800
+++ b/test/compiler/6896727/Test.java	Fri Jan 21 13:03:13 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it